I'm announcing the release of the 6.14.2 kernel.
All users of the 6.14 kernel series must upgrade.
The updated 6.14.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.14.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/devicetree/bindings/vendor-prefixes.yaml | 2 Documentation/netlink/specs/netdev.yaml | 4 Documentation/netlink/specs/rt_route.yaml | 180 - Documentation/networking/xsk-tx-metadata.rst | 62 Makefile | 2 arch/arm/Kconfig | 2 arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1-mba6ulx.dts | 3 arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1.dtsi | 2 arch/arm/boot/dts/ti/omap/omap4-panda-a4.dts | 5 arch/arm/include/asm/vmlinux.lds.h | 12 arch/arm64/boot/dts/freescale/imx8mp-skov-reva.dtsi | 39 arch/arm64/boot/dts/freescale/imx8mp.dtsi | 7 arch/arm64/boot/dts/mediatek/mt6359.dtsi | 3 arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi | 2 arch/arm64/boot/dts/mediatek/mt8173.dtsi | 6 arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts | 1033 --------- arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi | 1046 ++++++++++ arch/arm64/boot/dts/renesas/r8a774c0.dtsi | 4 arch/arm64/boot/dts/renesas/r8a77990.dtsi | 4 arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts | 2 arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts | 4 arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi | 2 arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts | 14 arch/arm64/boot/dts/rockchip/rk356x-base.dtsi | 25 arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts | 3 arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi | 2 arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts | 2 arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi | 6 arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi | 8 arch/arm64/boot/dts/ti/k3-am62p-main.dtsi | 26 arch/arm64/boot/dts/ti/k3-j722s-evm.dts | 2 arch/arm64/boot/dts/ti/k3-j722s-main.dtsi | 15 arch/arm64/include/asm/mem_encrypt.h | 11 arch/arm64/kernel/compat_alignment.c | 2 arch/loongarch/Kconfig | 4 arch/loongarch/include/asm/cache.h | 2 arch/loongarch/include/asm/irq.h | 2 arch/loongarch/include/asm/stacktrace.h | 3 arch/loongarch/include/asm/unwind_hints.h | 10 arch/loongarch/kernel/env.c | 2 arch/loongarch/kernel/kgdb.c | 5 arch/loongarch/net/bpf_jit.c | 12 arch/loongarch/net/bpf_jit.h | 5 arch/m68k/include/asm/processor.h | 14 arch/m68k/sun3/mmu_emu.c | 7 arch/parisc/include/uapi/asm/socket.h | 12 arch/powerpc/configs/mpc885_ads_defconfig | 2 arch/powerpc/crypto/Makefile | 1 arch/powerpc/kexec/relocate_32.S | 7 arch/powerpc/perf/core-book3s.c | 8 arch/powerpc/perf/vpa-pmu.c | 1 arch/powerpc/platforms/cell/spufs/gang.c | 1 arch/powerpc/platforms/cell/spufs/inode.c | 63 arch/powerpc/platforms/cell/spufs/spufs.h | 2 arch/riscv/Kconfig | 2 arch/riscv/errata/Makefile | 6 arch/riscv/include/asm/cpufeature.h | 4 arch/riscv/include/asm/ftrace.h | 4 arch/riscv/kernel/elf_kexec.c | 3 arch/riscv/kernel/mcount.S | 24 arch/riscv/kernel/traps_misaligned.c | 14 arch/riscv/kernel/unaligned_access_speed.c | 91 arch/riscv/kernel/vec-copy-unaligned.S | 2 arch/riscv/kvm/main.c | 4 arch/riscv/kvm/vcpu_pmu.c | 1 arch/riscv/mm/hugetlbpage.c | 76 arch/riscv/purgatory/entry.S | 1 arch/s390/include/asm/io.h | 2 arch/s390/include/asm/pgtable.h | 3 arch/s390/kernel/entry.S | 2 arch/s390/kernel/perf_pai_crypto.c | 3 arch/s390/kernel/perf_pai_ext.c | 3 arch/s390/mm/pgtable.c | 10 arch/um/include/shared/os.h | 1 arch/um/kernel/Makefile | 2 arch/um/kernel/maccess.c | 19 arch/um/os-Linux/process.c | 51 arch/x86/Kconfig | 3 arch/x86/Kconfig.cpu | 2 arch/x86/Makefile.um | 7 arch/x86/coco/tdx/tdx.c | 26 arch/x86/entry/calling.h | 2 arch/x86/entry/common.c | 2 arch/x86/entry/vdso/vdso-layout.lds.S | 2 arch/x86/entry/vdso/vma.c | 2 arch/x86/events/amd/brs.c | 3 arch/x86/events/amd/lbr.c | 3 arch/x86/events/core.c | 5 arch/x86/events/intel/core.c | 47 arch/x86/events/intel/ds.c | 13 arch/x86/events/intel/lbr.c | 50 arch/x86/events/perf_event.h | 18 arch/x86/hyperv/ivm.c | 2 arch/x86/include/asm/irqflags.h | 40 arch/x86/include/asm/paravirt.h | 20 arch/x86/include/asm/paravirt_types.h | 3 arch/x86/include/asm/tdx.h | 4 arch/x86/include/asm/tlbflush.h | 2 arch/x86/include/asm/vdso/vsyscall.h | 1 arch/x86/kernel/cpu/bus_lock.c | 20 arch/x86/kernel/cpu/mce/severity.c | 11 arch/x86/kernel/cpu/microcode/amd.c | 2 arch/x86/kernel/cpu/resctrl/rdtgroup.c | 3 arch/x86/kernel/dumpstack.c | 5 arch/x86/kernel/fpu/core.c | 6 arch/x86/kernel/paravirt.c | 14 arch/x86/kernel/process.c | 9 arch/x86/kernel/traps.c | 18 arch/x86/kernel/tsc.c | 4 arch/x86/kernel/uprobes.c | 14 arch/x86/kvm/svm/sev.c | 13 arch/x86/kvm/x86.c | 15 arch/x86/lib/copy_user_64.S | 18 arch/x86/mm/mem_encrypt_identity.c | 4 arch/x86/mm/pat/cpa-test.c | 2 arch/x86/mm/pat/memtype.c | 52 block/badblocks.c | 284 -- block/bio.c | 11 block/blk-settings.c | 51 block/blk-throttle.c | 13 crypto/algapi.c | 3 crypto/api.c | 17 crypto/bpf_crypto_skcipher.c | 1 drivers/accel/amdxdna/aie2_smu.c | 2 drivers/acpi/acpi_video.c | 9 drivers/acpi/nfit/core.c | 2 drivers/acpi/platform_profile.c | 26 drivers/acpi/processor_idle.c | 4 drivers/acpi/resource.c | 7 drivers/acpi/x86/utils.c | 3 drivers/ata/libata-core.c | 2 drivers/auxdisplay/Kconfig | 1 drivers/auxdisplay/panel.c | 4 drivers/base/power/main.c | 21 drivers/base/power/runtime.c | 2 drivers/block/null_blk/main.c | 17 drivers/block/ublk_drv.c | 39 drivers/bluetooth/btnxpuart.c | 6 drivers/bluetooth/btusb.c | 2 drivers/bus/qcom-ssc-block-bus.c | 34 drivers/clk/clk-stm32f4.c | 4 drivers/clk/imx/clk-imx8mp-audiomix.c | 6 drivers/clk/meson/g12a.c | 38 drivers/clk/meson/gxbb.c | 14 drivers/clk/mmp/clk-pxa1908-apmu.c | 4 drivers/clk/qcom/gcc-ipq5424.c | 24 drivers/clk/qcom/gcc-msm8953.c | 2 drivers/clk/qcom/gcc-sm8650.c | 4 drivers/clk/qcom/gcc-x1e80100.c | 30 drivers/clk/qcom/mmcc-sdm660.c | 2 drivers/clk/renesas/r9a08g045-cpg.c | 5 drivers/clk/renesas/rzg2l-cpg.c | 13 drivers/clk/renesas/rzg2l-cpg.h | 10 drivers/clk/rockchip/clk-rk3328.c | 2 drivers/clk/samsung/clk.c | 2 drivers/cpufreq/Kconfig.arm | 2 drivers/cpufreq/amd-pstate-trace.h | 46 drivers/cpufreq/amd-pstate.c | 80 drivers/cpufreq/amd-pstate.h | 18 drivers/cpufreq/armada-8k-cpufreq.c | 2 drivers/cpufreq/cpufreq-dt.c | 2 drivers/cpufreq/cpufreq_governor.c | 45 drivers/cpufreq/mediatek-cpufreq-hw.c | 2 drivers/cpufreq/mediatek-cpufreq.c | 2 drivers/cpufreq/mvebu-cpufreq.c | 2 drivers/cpufreq/qcom-cpufreq-hw.c | 2 drivers/cpufreq/qcom-cpufreq-nvmem.c | 8 drivers/cpufreq/scmi-cpufreq.c | 2 drivers/cpufreq/scpi-cpufreq.c | 7 drivers/cpufreq/sun50i-cpufreq-nvmem.c | 6 drivers/cpufreq/virtual-cpufreq.c | 2 drivers/cpuidle/cpuidle-arm.c | 8 drivers/cpuidle/cpuidle-big_little.c | 2 drivers/cpuidle/cpuidle-psci.c | 4 drivers/cpuidle/cpuidle-qcom-spm.c | 2 drivers/cpuidle/cpuidle-riscv-sbi.c | 4 drivers/crypto/hisilicon/sec2/sec.h | 1 drivers/crypto/hisilicon/sec2/sec_crypto.c | 125 - drivers/crypto/intel/iaa/iaa_crypto_main.c | 4 drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c | 1 drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c | 59 drivers/crypto/nx/nx-common-pseries.c | 37 drivers/crypto/tegra/tegra-se-aes.c | 401 ++- drivers/crypto/tegra/tegra-se-hash.c | 287 +- drivers/crypto/tegra/tegra-se-key.c | 29 drivers/crypto/tegra/tegra-se-main.c | 16 drivers/crypto/tegra/tegra-se.h | 39 drivers/dma/amd/ae4dma/ae4dma-pci.c | 4 drivers/dma/amd/ae4dma/ae4dma.h | 2 drivers/dma/amd/ptdma/ptdma-dmaengine.c | 90 drivers/dma/fsl-edma-main.c | 14 drivers/edac/i10nm_base.c | 2 drivers/edac/ie31200_edac.c | 19 drivers/edac/igen6_edac.c | 21 drivers/edac/skx_common.c | 33 drivers/edac/skx_common.h | 11 drivers/firmware/arm_ffa/bus.c | 3 drivers/firmware/arm_ffa/driver.c | 60 drivers/firmware/arm_scmi/driver.c | 10 drivers/firmware/cirrus/cs_dsp.c | 2 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 5 drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c | 461 ---- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 5 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 3 drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 2 drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 2 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c | 12 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 15 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 16 drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c | 4 drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c | 12 drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c | 3 drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 3 drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c | 12 drivers/gpu/drm/bridge/ite-it6505.c | 7 drivers/gpu/drm/bridge/ti-sn65dsi86.c | 2 drivers/gpu/drm/display/drm_dp_mst_topology.c | 8 drivers/gpu/drm/drm_file.c | 26 drivers/gpu/drm/mediatek/mtk_crtc.c | 7 drivers/gpu/drm/mediatek/mtk_dp.c | 6 drivers/gpu/drm/mediatek/mtk_dsi.c | 6 drivers/gpu/drm/mediatek/mtk_hdmi.c | 33 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 2 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 4 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 132 - drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h | 4 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 24 drivers/gpu/drm/msm/dsi/dsi_host.c | 8 drivers/gpu/drm/msm/dsi/dsi_manager.c | 32 drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c | 2 drivers/gpu/drm/msm/msm_atomic.c | 13 drivers/gpu/drm/msm/msm_dsc_helper.h | 11 drivers/gpu/drm/msm/msm_gem_submit.c | 2 drivers/gpu/drm/msm/msm_kms.h | 7 drivers/gpu/drm/panel/panel-ilitek-ili9882t.c | 2 drivers/gpu/drm/panthor/panthor_device.c | 22 drivers/gpu/drm/panthor/panthor_drv.c | 14 drivers/gpu/drm/panthor/panthor_fw.c | 9 drivers/gpu/drm/panthor/panthor_fw.h | 6 drivers/gpu/drm/panthor/panthor_heap.c | 54 drivers/gpu/drm/panthor/panthor_heap.h | 2 drivers/gpu/drm/panthor/panthor_mmu.c | 27 drivers/gpu/drm/panthor/panthor_mmu.h | 3 drivers/gpu/drm/panthor/panthor_sched.c | 84 drivers/gpu/drm/panthor/panthor_sched.h | 3 drivers/gpu/drm/solomon/ssd130x-spi.c | 7 drivers/gpu/drm/solomon/ssd130x.c | 6 drivers/gpu/drm/vkms/vkms_drv.c | 15 drivers/gpu/drm/xe/Kconfig | 2 drivers/gpu/drm/xlnx/zynqmp_dp.c | 2 drivers/gpu/drm/xlnx/zynqmp_dp_audio.c | 4 drivers/gpu/drm/xlnx/zynqmp_dpsub.c | 2 drivers/greybus/gb-beagleplay.c | 4 drivers/hid/Makefile | 1 drivers/hwtracing/coresight/coresight-catu.c | 2 drivers/hwtracing/coresight/coresight-core.c | 20 drivers/hwtracing/coresight/coresight-etm4x-core.c | 48 drivers/i3c/master/svc-i3c-master.c | 2 drivers/iio/accel/mma8452.c | 10 drivers/iio/accel/msa311.c | 26 drivers/iio/adc/ad4130.c | 41 drivers/iio/adc/ad7124.c | 60 drivers/iio/adc/ad7173.c | 30 drivers/iio/adc/ad7192.c | 5 drivers/iio/adc/ad7768-1.c | 15 drivers/iio/adc/ad_sigma_delta.c | 1 drivers/iio/dac/adi-axi-dac.c | 8 drivers/iio/industrialio-backend.c | 4 drivers/iio/industrialio-gts-helper.c | 11 drivers/iio/light/Kconfig | 1 drivers/iio/light/veml6030.c | 577 ++--- drivers/iio/light/veml6075.c | 8 drivers/infiniband/core/device.c | 18 drivers/infiniband/core/mad.c | 38 drivers/infiniband/core/sysfs.c | 1 drivers/infiniband/hw/erdma/erdma_cm.c | 1 drivers/infiniband/hw/mana/main.c | 2 drivers/infiniband/hw/mlx5/cq.c | 2 drivers/infiniband/hw/mlx5/mr.c | 41 drivers/infiniband/hw/mlx5/odp.c | 10 drivers/iommu/amd/amd_iommu.h | 7 drivers/iommu/intel/iommu.c | 17 drivers/iommu/io-pgtable-dart.c | 2 drivers/iommu/iommu.c | 5 drivers/leds/led-core.c | 22 drivers/leds/leds-st1202.c | 4 drivers/md/md-bitmap.c | 6 drivers/md/md.c | 71 drivers/md/md.h | 6 drivers/md/raid1-10.c | 2 drivers/md/raid1.c | 17 drivers/md/raid10.c | 19 drivers/media/dvb-frontends/dib8000.c | 5 drivers/media/platform/allegro-dvt/allegro-core.c | 1 drivers/media/platform/ti/omap3isp/isp.c | 7 drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c | 1 drivers/media/rc/streamzap.c | 2 drivers/media/test-drivers/vimc/vimc-streamer.c | 6 drivers/memory/mtk-smi.c | 33 drivers/mfd/sm501.c | 6 drivers/misc/pci_endpoint_test.c | 22 drivers/mmc/host/omap.c | 19 drivers/mmc/host/sdhci-omap.c | 4 drivers/mmc/host/sdhci-pxav3.c | 1 drivers/net/arcnet/com20020-pci.c | 17 drivers/net/bonding/bond_main.c | 8 drivers/net/bonding/bond_options.c | 3 drivers/net/can/rockchip/rockchip_canfd-core.c | 5 drivers/net/dsa/microchip/ksz8.c | 11 drivers/net/dsa/microchip/ksz_dcb.c | 231 -- drivers/net/dsa/mv88e6xxx/chip.c | 32 drivers/net/dsa/mv88e6xxx/phy.c | 3 drivers/net/dsa/sja1105/sja1105_ethtool.c | 9 drivers/net/dsa/sja1105/sja1105_ptp.c | 20 drivers/net/dsa/sja1105/sja1105_static_config.c | 6 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 19 drivers/net/ethernet/broadcom/bnxt/bnxt.h | 6 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 3 drivers/net/ethernet/ibm/ibmveth.c | 39 drivers/net/ethernet/ibm/ibmvnic.c | 30 drivers/net/ethernet/intel/e1000e/defines.h | 3 drivers/net/ethernet/intel/e1000e/ich8lan.c | 80 drivers/net/ethernet/intel/e1000e/ich8lan.h | 4 drivers/net/ethernet/intel/ice/devlink/health.c | 6 drivers/net/ethernet/intel/ice/ice_common.c | 3 drivers/net/ethernet/intel/ice/ice_ptp.c | 6 drivers/net/ethernet/intel/ice/ice_virtchnl.c | 39 drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 24 drivers/net/ethernet/intel/idpf/idpf_lib.c | 31 drivers/net/ethernet/intel/idpf/idpf_main.c | 6 drivers/net/ethernet/intel/igb/igb_ptp.c | 6 drivers/net/ethernet/intel/igc/igc.h | 1 drivers/net/ethernet/intel/igc/igc_main.c | 143 + drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c | 4 drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 3 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 3 drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c | 201 + drivers/net/ethernet/marvell/octeontx2/af/rvu.c | 2 drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c | 2 drivers/net/ethernet/mediatek/airoha_eth.c | 20 drivers/net/ethernet/mellanox/mlx5/core/en/params.c | 8 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c | 2 drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 4 drivers/net/ethernet/mellanox/mlx5/core/main.c | 15 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c | 27 drivers/net/ethernet/microchip/lan743x_ptp.c | 6 drivers/net/ethernet/renesas/ravb_ptp.c | 3 drivers/net/ethernet/sfc/ef100_netdev.c | 7 drivers/net/ethernet/sfc/ef100_nic.c | 47 drivers/net/ethernet/sfc/efx.c | 24 drivers/net/ethernet/sfc/mcdi_port.c | 59 drivers/net/ethernet/sfc/mcdi_port_common.c | 11 drivers/net/ethernet/sfc/net_driver.h | 6 drivers/net/ethernet/wangxun/libwx/wx_lib.c | 65 drivers/net/ipvlan/ipvlan_l3s.c | 1 drivers/net/phy/bcm-phy-ptp.c | 3 drivers/net/phy/broadcom.c | 6 drivers/net/usb/rndis_host.c | 16 drivers/net/usb/usbnet.c | 6 drivers/net/virtio_net.c | 30 drivers/net/vmxnet3/vmxnet3_drv.c | 10 drivers/net/wireless/ath/ath11k/dp_rx.c | 14 drivers/net/wireless/ath/ath11k/mac.c | 5 drivers/net/wireless/ath/ath11k/pci.c | 2 drivers/net/wireless/ath/ath11k/reg.c | 22 drivers/net/wireless/ath/ath12k/core.c | 4 drivers/net/wireless/ath/ath12k/dp_rx.c | 2 drivers/net/wireless/ath/ath12k/dp_tx.c | 2 drivers/net/wireless/ath/ath12k/mac.c | 9 drivers/net/wireless/ath/ath12k/pci.c | 2 drivers/net/wireless/ath/ath12k/wmi.c | 2 drivers/net/wireless/ath/ath9k/common-spectral.c | 4 drivers/net/wireless/marvell/mwifiex/fw.h | 14 drivers/net/wireless/marvell/mwifiex/main.c | 4 drivers/net/wireless/marvell/mwifiex/sta_cmd.c | 18 drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c | 45 drivers/net/wireless/mediatek/mt76/mt7921/main.c | 1 drivers/net/wireless/mediatek/mt76/mt7925/mcu.c | 1 drivers/net/wireless/realtek/rtw89/core.h | 2 drivers/net/wireless/realtek/rtw89/fw.c | 12 drivers/net/wireless/realtek/rtw89/pci.h | 56 drivers/net/wireless/realtek/rtw89/pci_be.c | 2 drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c | 13 drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c | 13 drivers/ntb/hw/intel/ntb_hw_gen3.c | 3 drivers/ntb/hw/mscc/ntb_hw_switchtec.c | 2 drivers/ntb/test/ntb_perf.c | 4 drivers/nvdimm/badrange.c | 2 drivers/nvdimm/nd.h | 2 drivers/nvdimm/pfn_devs.c | 7 drivers/nvdimm/pmem.c | 2 drivers/nvme/host/ioctl.c | 2 drivers/nvme/host/pci.c | 3 drivers/nvme/target/debugfs.c | 2 drivers/nvme/target/pci-epf.c | 11 drivers/pci/controller/cadence/pcie-cadence-ep.c | 3 drivers/pci/controller/cadence/pcie-cadence.h | 2 drivers/pci/controller/dwc/pcie-designware-ep.c | 1 drivers/pci/controller/dwc/pcie-histb.c | 12 drivers/pci/controller/pcie-brcmstb.c | 16 drivers/pci/controller/pcie-mediatek-gen3.c | 28 drivers/pci/controller/pcie-xilinx-cpm.c | 10 drivers/pci/endpoint/functions/pci-epf-test.c | 126 - drivers/pci/hotplug/pciehp_hpc.c | 4 drivers/pci/iov.c | 48 drivers/pci/pci-sysfs.c | 4 drivers/pci/pci.c | 22 drivers/pci/pcie/aspm.c | 17 drivers/pci/pcie/bwctrl.c | 6 drivers/pci/pcie/portdrv.c | 8 drivers/pci/probe.c | 5 drivers/pci/setup-bus.c | 39 drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c | 50 drivers/pinctrl/bcm/pinctrl-bcm2835.c | 14 drivers/pinctrl/intel/pinctrl-intel.c | 1 drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c | 10 drivers/pinctrl/renesas/pinctrl-rza2.c | 2 drivers/pinctrl/renesas/pinctrl-rzg2l.c | 3 drivers/pinctrl/renesas/pinctrl-rzv2m.c | 2 drivers/pinctrl/tegra/pinctrl-tegra.c | 3 drivers/platform/x86/dell/dell-uart-backlight.c | 2 drivers/platform/x86/dell/dell-wmi-ddv.c | 6 drivers/platform/x86/intel/speed_select_if/isst_if_common.c | 2 drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c | 2 drivers/platform/x86/thinkpad_acpi.c | 11 drivers/power/supply/bq27xxx_battery.c | 1 drivers/power/supply/max77693_charger.c | 2 drivers/ptp/ptp_ocp.c | 4 drivers/regulator/pca9450-regulator.c | 6 drivers/remoteproc/qcom_q6v5_mss.c | 21 drivers/remoteproc/qcom_q6v5_pas.c | 13 drivers/remoteproc/remoteproc_core.c | 1 drivers/rtc/rtc-renesas-rtca3.c | 15 drivers/scsi/hisi_sas/hisi_sas.h | 3 drivers/scsi/hisi_sas/hisi_sas_main.c | 28 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 4 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 4 drivers/scsi/mpi3mr/mpi3mr_app.c | 1 drivers/scsi/mpt3sas/mpt3sas_base.c | 12 drivers/scsi/mpt3sas/mpt3sas_scsih.c | 2 drivers/soc/mediatek/mt8167-mmsys.h | 13 drivers/soc/mediatek/mt8188-mmsys.h | 2 drivers/soc/mediatek/mt8365-mmsys.h | 48 drivers/soundwire/generic_bandwidth_allocation.c | 5 drivers/soundwire/slave.c | 1 drivers/spi/spi-amd.c | 2 drivers/spi/spi-bcm2835.c | 18 drivers/spi/spi-cadence-xspi.c | 2 drivers/staging/gpib/agilent_82350b/agilent_82350b.c | 2 drivers/staging/gpib/agilent_82357a/agilent_82357a.c | 424 +--- drivers/staging/gpib/cb7210/cb7210.c | 2 drivers/staging/gpib/hp_82341/hp_82341.c | 2 drivers/staging/gpib/ni_usb/ni_usb_gpib.c | 518 ++-- drivers/staging/rtl8723bs/Kconfig | 1 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c | 28 drivers/target/loopback/tcm_loop.c | 5 drivers/thermal/intel/int340x_thermal/int3402_thermal.c | 3 drivers/tty/n_tty.c | 13 drivers/tty/serial/fsl_lpuart.c | 312 +- drivers/usb/host/xhci-mem.c | 6 drivers/usb/typec/altmodes/thunderbolt.c | 10 drivers/usb/typec/ucsi/ucsi_ccg.c | 5 drivers/vhost/scsi.c | 25 drivers/video/console/Kconfig | 6 drivers/video/fbdev/au1100fb.c | 4 drivers/video/fbdev/sm501fb.c | 7 drivers/w1/masters/w1-uart.c | 4 fs/9p/vfs_inode_dotl.c | 2 fs/autofs/autofs_i.h | 2 fs/bcachefs/fs-ioctl.c | 6 fs/btrfs/block-group.c | 40 fs/btrfs/disk-io.c | 3 fs/coredump.c | 4 fs/dlm/lockspace.c | 2 fs/erofs/internal.h | 2 fs/erofs/super.c | 8 fs/exec.c | 15 fs/exfat/fatent.c | 2 fs/exfat/file.c | 29 fs/exfat/inode.c | 41 fs/ext4/dir.c | 3 fs/ext4/ext4.h | 17 fs/ext4/ext4_jbd2.h | 29 fs/ext4/inode.c | 19 fs/ext4/mballoc-test.c | 2 fs/ext4/namei.c | 16 fs/ext4/super.c | 81 fs/ext4/xattr.c | 32 fs/ext4/xattr.h | 10 fs/f2fs/checkpoint.c | 15 fs/f2fs/compress.c | 1 fs/f2fs/data.c | 10 fs/f2fs/f2fs.h | 3 fs/f2fs/file.c | 20 fs/f2fs/inode.c | 7 fs/f2fs/namei.c | 8 fs/f2fs/segment.c | 29 fs/f2fs/segment.h | 9 fs/f2fs/super.c | 67 fs/fsopen.c | 2 fs/fuse/dax.c | 1 fs/fuse/dir.c | 2 fs/fuse/file.c | 4 fs/gfs2/super.c | 21 fs/hostfs/hostfs.h | 2 fs/hostfs/hostfs_kern.c | 7 fs/hostfs/hostfs_user.c | 59 fs/isofs/dir.c | 3 fs/jbd2/journal.c | 27 fs/jfs/inode.c | 2 fs/jfs/jfs_dtree.c | 3 fs/jfs/jfs_extent.c | 10 fs/jfs/jfs_imap.c | 13 fs/jfs/xattr.c | 15 fs/nfs/delegation.c | 63 fs/nfs/nfs4xdr.c | 18 fs/nfs/sysfs.c | 22 fs/nfs/write.c | 4 fs/nfsd/Kconfig | 12 fs/nfsd/nfs4callback.c | 14 fs/nfsd/nfs4state.c | 53 fs/nfsd/nfsctl.c | 53 fs/nfsd/stats.c | 4 fs/nfsd/stats.h | 2 fs/nfsd/vfs.c | 28 fs/ntfs3/attrib.c | 3 fs/ntfs3/file.c | 22 fs/ntfs3/frecord.c | 6 fs/ntfs3/index.c | 4 fs/ntfs3/ntfs.h | 2 fs/ntfs3/super.c | 89 fs/ocfs2/alloc.c | 8 fs/proc/base.c | 2 fs/smb/client/connect.c | 16 fs/smb/server/auth.c | 6 fs/smb/server/connection.h | 11 fs/smb/server/mgmt/user_session.c | 37 fs/smb/server/mgmt/user_session.h | 2 fs/smb/server/oplock.c | 12 fs/smb/server/smb2pdu.c | 54 fs/smb/server/smbacl.c | 21 include/asm-generic/rwonce.h | 10 include/drm/display/drm_dp_mst_helper.h | 7 include/drm/drm_file.h | 5 include/linux/arm_ffa.h | 3 include/linux/avf/virtchnl.h | 4 include/linux/badblocks.h | 10 include/linux/context_tracking_irq.h | 8 include/linux/coresight.h | 4 include/linux/cpuset.h | 11 include/linux/dma-direct.h | 13 include/linux/fwnode.h | 2 include/linux/if_bridge.h | 6 include/linux/iio/iio-gts-helper.h | 1 include/linux/iio/iio.h | 26 include/linux/interrupt.h | 8 include/linux/mem_encrypt.h | 23 include/linux/nfs_fs_sb.h | 4 include/linux/nmi.h | 4 include/linux/perf_event.h | 37 include/linux/pgtable.h | 28 include/linux/pm_runtime.h | 2 include/linux/rcupdate.h | 2 include/linux/reboot.h | 18 include/linux/sched.h | 7 include/linux/sched/deadline.h | 4 include/linux/sched/smt.h | 2 include/linux/seccomp.h | 8 include/linux/thermal.h | 2 include/linux/uprobes.h | 2 include/linux/writeback.h | 24 include/net/ax25.h | 1 include/net/bluetooth/hci.h | 34 include/net/bluetooth/hci_core.h | 5 include/net/bonding.h | 1 include/net/xdp_sock.h | 10 include/net/xdp_sock_drv.h | 1 include/net/xfrm.h | 11 include/rdma/ib_verbs.h | 1 include/trace/define_trace.h | 7 include/trace/events/writeback.h | 21 include/uapi/linux/if_xdp.h | 10 include/uapi/linux/netdev.h | 3 init/Kconfig | 5 io_uring/io-wq.c | 40 io_uring/io-wq.h | 7 io_uring/io_uring.c | 3 io_uring/net.c | 23 kernel/bpf/core.c | 19 kernel/bpf/verifier.c | 7 kernel/cgroup/cpuset.c | 27 kernel/cpu.c | 5 kernel/events/core.c | 39 kernel/events/ring_buffer.c | 2 kernel/events/uprobes.c | 15 kernel/fork.c | 4 kernel/kexec_elf.c | 2 kernel/reboot.c | 84 kernel/rseq.c | 80 kernel/sched/core.c | 8 kernel/sched/deadline.c | 37 kernel/sched/debug.c | 8 kernel/sched/fair.c | 50 kernel/sched/rt.c | 2 kernel/sched/sched.h | 2 kernel/sched/topology.c | 15 kernel/seccomp.c | 14 kernel/trace/bpf_trace.c | 2 kernel/trace/ring_buffer.c | 4 kernel/trace/trace_events.c | 7 kernel/trace/trace_events_synth.c | 36 kernel/trace/trace_functions_graph.c | 1 kernel/trace/trace_irqsoff.c | 2 kernel/trace/trace_osnoise.c | 1 kernel/trace/trace_sched_wakeup.c | 2 kernel/watch_queue.c | 9 kernel/watchdog.c | 25 kernel/watchdog_perf.c | 28 lib/842/842_compress.c | 2 lib/stackinit_kunit.c | 30 lib/vsprintf.c | 2 mm/gup.c | 3 mm/memory.c | 13 mm/page-writeback.c | 37 mm/zswap.c | 30 net/ax25/af_ax25.c | 30 net/ax25/ax25_route.c | 74 net/bluetooth/hci_core.c | 62 net/bluetooth/hci_event.c | 25 net/bluetooth/hci_sync.c | 30 net/bridge/br_ioctl.c | 36 net/bridge/br_private.h | 3 net/core/dev_ioctl.c | 19 net/core/dst.c | 8 net/core/netdev-genl.c | 2 net/core/rtnetlink.c | 3 net/core/rtnl_net_debug.c | 2 net/ipv4/ip_tunnel_core.c | 4 net/ipv4/udp.c | 42 net/ipv6/addrconf.c | 37 net/ipv6/calipso.c | 21 net/ipv6/route.c | 42 net/mac80211/cfg.c | 12 net/mac80211/mlme.c | 9 net/netfilter/nf_tables_api.c | 4 net/netfilter/nf_tables_core.c | 11 net/netfilter/nfnetlink_queue.c | 2 net/netfilter/nft_set_hash.c | 3 net/netfilter/nft_tunnel.c | 6 net/openvswitch/actions.c | 6 net/sched/act_tunnel_key.c | 2 net/sched/cls_flower.c | 2 net/sched/sch_skbprio.c | 3 net/sctp/sysctl.c | 4 net/socket.c | 19 net/vmw_vsock/af_vsock.c | 6 net/wireless/core.c | 6 net/wireless/nl80211.c | 2 net/xdp/xsk.c | 8 net/xfrm/xfrm_device.c | 13 net/xfrm/xfrm_state.c | 32 net/xfrm/xfrm_user.c | 2 rust/Makefile | 4 rust/kernel/print.rs | 7 samples/bpf/Makefile | 2 samples/trace_events/trace-events-sample.h | 8 scripts/gdb/linux/symbols.py | 13 scripts/package/debian/rules | 6 scripts/selinux/install_policy.sh | 15 security/smack/smack.h | 6 security/smack/smack_lsm.c | 34 sound/core/timer.c | 147 - sound/pci/hda/patch_realtek.c | 9 sound/soc/amd/acp/acp-legacy-common.c | 10 sound/soc/codecs/cs35l41-spi.c | 5 sound/soc/codecs/mt6359.c | 9 sound/soc/codecs/rt5665.c | 24 sound/soc/fsl/imx-card.c | 4 sound/soc/generic/simple-card-utils.c | 7 sound/soc/tegra/tegra210_adx.c | 6 sound/soc/ti/j721e-evm.c | 2 sound/usb/mixer_quirks.c | 7 tools/arch/x86/lib/insn.c | 2 tools/bpf/runqslower/Makefile | 3 tools/include/uapi/linux/if_xdp.h | 10 tools/include/uapi/linux/netdev.h | 3 tools/lib/bpf/btf.c | 4 tools/lib/bpf/linker.c | 2 tools/lib/bpf/str_error.c | 2 tools/lib/bpf/str_error.h | 7 tools/objtool/arch/loongarch/decode.c | 28 tools/objtool/arch/loongarch/include/arch/elf.h | 7 tools/objtool/arch/powerpc/decode.c | 14 tools/objtool/arch/x86/decode.c | 13 tools/objtool/check.c | 84 tools/objtool/elf.c | 6 tools/objtool/include/objtool/arch.h | 3 tools/objtool/include/objtool/elf.h | 27 tools/perf/Makefile.config | 10 tools/perf/Makefile.perf | 2 tools/perf/arch/powerpc/util/header.c | 4 tools/perf/arch/x86/util/topdown.c | 2 tools/perf/bench/syscall.c | 22 tools/perf/builtin-report.c | 32 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json | 10 tools/perf/pmu-events/empty-pmu-events.c | 8 tools/perf/pmu-events/jevents.py | 8 tools/perf/tests/hwmon_pmu.c | 16 tools/perf/tests/pmu.c | 85 tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S | 2 tools/perf/tests/shell/record_bpf_filter.sh | 4 tools/perf/tests/shell/stat_all_pmu.sh | 48 tools/perf/tests/shell/test_data_symbol.sh | 17 tools/perf/tests/tool_pmu.c | 4 tools/perf/tests/workloads/datasym.c | 34 tools/perf/util/arm-spe.c | 8 tools/perf/util/bpf-filter.l | 2 tools/perf/util/comm.c | 2 tools/perf/util/debug.c | 2 tools/perf/util/dso.h | 4 tools/perf/util/evlist.c | 13 tools/perf/util/evsel.c | 16 tools/perf/util/expr.c | 2 tools/perf/util/hwmon_pmu.c | 14 tools/perf/util/hwmon_pmu.h | 16 tools/perf/util/intel-tpebs.c | 2 tools/perf/util/machine.c | 4 tools/perf/util/parse-events.c | 2 tools/perf/util/pmu.c | 263 +- tools/perf/util/pmu.h | 12 tools/perf/util/pmus.c | 171 + tools/perf/util/python.c | 17 tools/perf/util/stat-shadow.c | 3 tools/perf/util/stat.c | 13 tools/perf/util/tool_pmu.c | 34 tools/perf/util/tool_pmu.h | 2 tools/perf/util/units.c | 2 tools/power/x86/turbostat/turbostat.8 | 2 tools/power/x86/turbostat/turbostat.c | 30 tools/testing/selftests/bpf/Makefile | 1 tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c | 5 tools/testing/selftests/bpf/prog_tests/tailcalls.c | 1 tools/testing/selftests/bpf/progs/strncmp_bench.c | 5 tools/testing/selftests/mm/cow.c | 2 tools/testing/selftests/pcie_bwctrl/Makefile | 2 tools/verification/rv/Makefile.rv | 2 748 files changed, 9332 insertions(+), 7044 deletions(-)
Aaron Kling (1): cpufreq: tegra194: Allow building for Tegra234
Acs, Jakub (1): ext4: fix OOB read when checking dotdot dir
Aditya Kumar Singh (1): wifi: nl80211: store chandef on the correct link when starting CAC
Adrián Larumbe (5): drm/panthor: Fix race condition when gathering fdinfo group samples drm/file: Add fdinfo helper for printing regions with prefix drm/panthor: Expose size of driver internal BO's over fdinfo drm/panthor: Replace sleep locks with spinlocks in fdinfo path drm/panthor: Avoid sleep locking in the internal BO size path
Ahmad Fatoum (4): arm64: dts: imx8mp-skov: correct PMIC board limits arm64: dts: imx8mp-skov: operate CPU at 850 mV by default reboot: replace __hw_protection_shutdown bool action parameter with an enum reboot: reboot, not shutdown, on hw_protection_reboot timeout
Akhil R (10): crypto: tegra - Use separate buffer for setkey crypto: tegra - Do not use fixed size buffers crypto: tegra - check return value for hash do_one_req crypto: tegra - Transfer HASH init function to crypto engine crypto: tegra - Fix HASH intermediate result handling crypto: tegra - Use HMAC fallback when keyslots are full crypto: tegra - Fix CMAC intermediate result handling crypto: tegra - Set IV to NULL explicitly for AES ECB crypto: tegra - finalize crypto req on error crypto: tegra - Reserve keyslots to allocate dynamically
Akihiko Odaki (1): virtio_net: Fix endian with virtio_net_ctrl_rss
Al Viro (3): spufs: fix a leak on spufs_new_file() failure spufs: fix gang directory lifetimes spufs: fix a leak in spufs_create_context()
Alex Deucher (7): drm/amdgpu/umsch: declare umsch firmware drm/amdgpu/umsch: fix ucode check drm/amdgpu/vcn5.0.1: use correct dpm helper drm/amdgpu/mes: optimize compute loop handling drm/amdgpu/mes: enable compute pipes across all MEC drm/amdgpu/gfx11: fix num_mec drm/amdgpu/gfx12: fix num_mec
Alexandre Ghiti (2): riscv: Fix missing __free_pages() in check_vector_unaligned_access() riscv: Fix hugetlb retrieval of number of ptes in case of !present pte
Alexandru Gagniuc (1): kbuild: deb-pkg: don't set KBUILD_BUILD_VERSION unconditionally
Alice Ryhl (1): rust: fix signature of rust_fmt_argument
Alistair Popple (1): fuse: fix dax truncate/punch_hole fault path
Andreas Gruenbacher (2): gfs2: minor evict fix gfs2: skip if we cannot defer delete
Andrew Jones (6): riscv: Annotate unaligned access init functions riscv: Fix riscv_online_cpu_vec riscv: Fix check_unaligned_access_all_cpus riscv: Change check_unaligned_access_speed_all_cpus to void riscv: Fix set up of cpu hotplug callbacks riscv: Fix set up of vector cpu hotplug callback
Andrii Nakryiko (1): libbpf: Fix hypothetical STT_SECTION extern NULL deref case
Andy Shevchenko (3): auxdisplay: panel: Fix an API misuse in panel.c pinctrl: npcm8xx: Fix incorrect struct npcm8xx_pincfg assignment pinctrl: intel: Fix wrong bypass assignment in intel_pinctrl_probe_pwm()
Angelo Dureghello (1): iio: dac: adi-axi-dac: modify stream enable
AngeloGioacchino Del Regno (5): soc: mediatek: mtk-mmsys: Fix MT8188 VDO1 DPI1 output selection soc: mediatek: mt8167-mmsys: Fix missing regval in all entries soc: mediatek: mt8365-mmsys: Fix routing table masks and values drm/mediatek: mtk_hdmi: Unregister audio platform device on failure drm/mediatek: mtk_hdmi: Fix typo for aud_sampe_size member
Angelos Oikonomopoulos (1): arm64: Don't call NULL in do_compat_alignment_fixup()
Anshuman Khandual (1): arch/powerpc: drop GENERIC_PTDUMP from mpc885_ads_defconfig
Antoine Tenart (1): net: decrease cached dst counters in dst_release
Antonio Quartulli (1): scripts/gdb/linux/symbols.py: address changes to module_sect_attrs
Anuj Gupta (2): block: ensure correct integrity capability propagation in stacked devices block: Correctly initialize BLK_INTEGRITY_NOGENERATE and BLK_INTEGRITY_NOVERIFY
Armin Wolf (1): platform/x86: dell-ddv: Fix temperature calculation
Arnaldo Carvalho de Melo (5): perf units: Fix insufficient array space perf python: Fixup description of sample.id event member perf python: Decrement the refcount of just created event on failure perf python: Don't keep a raw_data pointer to consumed ring buffer space perf python: Check if there is space to copy all the event
Arnd Bergmann (6): x86/platform: Only allow CONFIG_EISA for 32-bit firmware: arm_scmi: use ioread64() instead of ioread64_hi_lo() dummycon: fix default rows/cols mdacon: rework dependency list crypto: bpf - Add MODULE_DESCRIPTION for skcipher x86/Kconfig: Add cmpxchg8b support back to Geode CPUs
Artur Weber (1): power: supply: max77693: Fix wrong conversion of charge input threshold value
Asahi Lina (1): iommu/io-pgtable-dart: Only set subpage protection disable for DART 1
Ashley Smith (1): drm/panthor: Update CS_STATUS_ defines to correct values
Atish Patra (2): RISC-V: KVM: Disable the kernel perf counter during configure RISC-V: KVM: Teardown riscv specific bits after kvm_exit
Aurabindo Pillai (1): drm/amd/display: fix an indent issue in DML21
Bairavi Alagappan (2): crypto: qat - set parity error mask for qat_420xx crypto: qat - remove access to parity register for QAT GEN4
Baochen Qiang (1): wifi: ath12k: use link specific bss_conf as well in ath12k_mac_vif_cache_flush()
Baokun Li (5): ext4: convert EXT4_FLAGS_* defines to enum ext4: add EXT4_FLAGS_EMERGENCY_RO bit ext4: correct behavior under errors=remount-ro mode ext4: show 'emergency_ro' when EXT4_FLAGS_EMERGENCY_RO is set ext4: goto right label 'out_mmap_sem' in ext4_setattr()
Bard Liao (1): soundwire: take in count the bandwidth of a prepared stream
Barnabás Czémán (1): clk: qcom: mmcc-sdm660: fix stuck video_subcore0 clock
Bart Van Assche (5): wifi: ath12k: Fix locking in "QMI firmware ready" error paths scsi: mpi3mr: Fix locking in an error path scsi: mpt3sas: Fix a locking bug in an error path drm: zynqmp_dp: Fix a deadlock in zynqmp_dp_ignore_hpd_set() fs/procfs: fix the comment above proc_pid_wchan()
Bartosz Golaszewski (1): pinctrl: bcm2835: don't -EINVAL on alternate funcs from get_direction()
Basavaraj Natikar (2): dmaengine: ae4dma: Use the MSI count and its corresponding IRQ number dmaengine: ptdma: Utilize the AE4DMA engine's multi-queue functionality
Benjamin Berg (3): x86/fpu: Avoid copying dynamic FP state from init_task in arch_dup_task_struct() um: remove copy_from_kernel_nofault_allowed um: hostfs: avoid issues on inode number reuse by host
Benjamin Gaignard (1): media: verisilicon: HEVC: Initialize start_bit field
Benson Leung (2): usb: typec: thunderbolt: Fix loops that iterate TYPEC_PLUG_SOP_P and TYPEC_PLUG_SOP_PP usb: typec: thunderbolt: Remove IS_ERR check for plug
Björn Töpel (1): riscv/purgatory: 4B align purgatory_start
Boris Brezillon (1): drm/panthor: Fix a race between the reset and suspend path
Boris Burkov (1): btrfs: fix block group refcount race in btrfs_create_pending_block_groups()
Boris Ostrovsky (1): x86/microcode/AMD: Fix __apply_microcode_amd()'s return value
Caleb Sander Mateos (3): io_uring/net: only import send_zc buffer once io_uring: use lockless_cq flag in io_req_complete_post() nvme/ioctl: don't warn on vectorized uring_cmd with fixed buffer
Candice Li (1): Remove unnecessary firmware version check for gc v9_4_2
Chao Gao (1): x86/fpu/xstate: Fix inconsistencies in guest FPU xfeatures
Chao Yu (7): f2fs: quota: fix to avoid warning in dquot_writeback_dquots() f2fs: fix to avoid panic once fallocation fails for pinfile f2fs: fix to set .discard_granularity correctly f2fs: fix potential deadloop in prepare_compress_overwrite() f2fs: fix to call f2fs_recover_quota_end() correctly f2fs: fix to avoid accessing uninitialized curseg f2fs: fix to avoid running out of free segments
Charles Han (3): ext4: fix potential null dereference in ext4 kunit test drm: xlnx: zynqmp_dpsub: Add NULL check in zynqmp_audio_init clk: mmp: Fix NULL vs IS_ERR() check
Chen-Yu Tsai (3): arm64: dts: mediatek: mt8173-elm: Drop pmic's #address-cells and #size-cells arm64: dts: mediatek: mt8173: Fix some node names arm64: dts: rockchip: Remove bluetooth node from rock-3a
Cheng Xu (1): RDMA/erdma: Prevent use-after-free in erdma_accept_newconn()
Chenyuan Yang (3): thermal: int340x: Add NULL check for adev netfilter: nfnetlink_queue: Initialize ctx to avoid memory allocation error w1: fix NULL pointer dereference in probe
Chiara Meiohas (1): RDMA/mlx5: Fix calculation of total invalidated pages
Christian Brauner (1): fs: support O_PATH fds with FSCONFIG_SET_FD
Christian Eggers (1): ARM: 9444/1: add KEEP() keyword to ARM_VECTORS
Christian Schoenebeck (1): fs/9p: fix NULL pointer dereference on mkdir
Christophe JAILLET (4): bus: qcom-ssc-block-bus: Remove some duplicated iounmap() calls bus: qcom-ssc-block-bus: Fix the error handling path of qcom_ssc_block_bus_probe() PCI: histb: Fix an error handling path in histb_pcie_probe() ASoC: codecs: rt5665: Fix some error handling paths in rt5665_probe()
Christophe Leroy (2): crypto: powerpc: Mark ghashp8-ppc.o as an OBJECT_FILES_NON_STANDARD powerpc/kexec: fix physical address calculation in clear_utlb_entry()
Chuck Lever (5): NFSD: Fix callback decoder status codes NFSD: Add a Kconfig setting to enable delegated timestamps NFSD: nfsd_unlink() clobbers non-zero status returned from fh_fill_pre_attrs() NFSD: Never return NFS4ERR_FILE_OPEN when removing a directory NFSD: Skip sending CB_RECALL_ANY when the backchannel isn't up
Chukun Pan (1): arm64: dts: rockchip: Move rk356x scmi SHMEM to reserved memory
Chunhai Guo (1): f2fs: fix missing discard for active segments
Claudiu Beznea (3): pinctrl: renesas: rzg2l: Suppress binding attributes clk: renesas: r8a08g045: Check the source of the CPU PLL settings rtc: renesas-rtca3: Disable interrupts only if the RTC is enabled
Cong Wang (1): net_sched: skbprio: Remove overly strict queue assertions
Cyan Yang (1): selftests/mm/cow: fix the incorrect error handling
Dan Carpenter (7): drm/msm/gem: Fix error code msm_parse_deps() PCI: Remove stray put_device() in pci_register_host_bridge() drm/mediatek: dsi: fix error codes in mtk_dsi_host_transfer() PCI: dwc: ep: Return -ENOMEM for allocation failures fs/ntfs3: Fix a couple integer overflows on 32bit systems fs/ntfs3: Prevent integer overflow in hdr_first_de() nfs: Add missing release on error in nfs_lock_and_join_requests()
Daniel Stodden (1): PCI/ASPM: Fix link state exit during switch upstream function removal
Danila Chernetsov (1): fbdev: sm501fb: Add some geometry checks.
Dapeng Mi (1): perf x86/topdown: Fix topdown leader sampling test error on hybrid
Dario Binacchi (1): clk: stm32f4: fix an uninitialized variable
Dave Marquardt (1): net: ibmveth: make veth_pool_store stop hanging
Dave Penkler (7): staging: gpib: Add missing interface entry point staging: gpib: Fix pr_err format warning staging: gpib: Fix cb7210 pcmcia Oops staging: gpib: ni_usb console messaging cleanup staging: gpib: Fix Oops after disconnect in ni_usb staging: gpib: agilent usb console messaging cleanup staging: gpib: Fix Oops after disconnect in agilent usb
David Gow (1): um: Pass the correct Rust target and options with gcc
David Hildenbrand (3): x86/mm/pat: Fix VM_PAT handling when fork() fails in copy_page_range() kernel/events/uprobes: handle device-exclusive entries correctly in __replace_page() mm/gup: reject FOLL_SPLIT_PMD with hugetlb VMAs
David Laight (1): objtool: Fix verbose disassembly if CROSS_COMPILE isn't set
David Oberhollenzer (1): net: dsa: mv88e6xxx: propperly shutdown PPU re-enable timer on destroy
Debin Zhu (1): netlabel: Fix NULL pointer exception caused by CALIPSO on IPv4 sockets
Dhananjay Ugwekar (4): cpufreq/amd-pstate: Modify the min_perf calculation in adjust_perf callback cpufreq/amd-pstate: Pass min/max_limit_perf as min/max_perf to amd_pstate_update cpufreq/amd-pstate: Convert all perf values to u8 cpufreq/amd-pstate: Add missing NULL ptr check in amd_pstate_update
Dmitry Antipov (3): wifi: ath9k: do not submit zero bytes to the entropy pool jfs: reject on-disk inodes of an unsupported type wifi: rtw89: rtw8852b{t}: fix TSSI debug timestamps
Dmitry Baryshkov (4): drm/msm/dpu: don't use active in atomic_check() drm/msm/dpu: move needs_cdm setting to dpu_encoder_get_topology() drm/msm/dpu: simplify dpu_encoder_get_topology() interface drm/msm/dpu: don't set crtc_state->mode_changed from atomic_check()
Dmitry Vyukov (1): perf report: Fix input reload/switch with symbol sort key
Douglas Anderson (1): drm/mediatek: dp: drm_err => dev_err in HPD path to avoid NULL ptr
Douglas Raillard (2): tracing: Ensure module defining synth event cannot be unloaded while tracing tracing: Fix synth event printk format for str fields
Eduard Christian Dumitrescu (1): platform/x86: thinkpad_acpi: disable ACPI fan access for T495* and E560
Edward Adam Davis (1): wifi: cfg80211: init wiphy_work before allocating rfkill fails
Edward Cree (2): sfc: rip out MDIO support sfc: fix NULL dereferences in ef100_process_design_param()
Emil Tantilov (2): idpf: check error for register_netdev() on init idpf: fix adapter NULL pointer dereference on reboot
Eric Dumazet (1): sctp: add mutual exclusion in proc_sctp_do_udp_port()
Eric Sandeen (1): watch_queue: fix pipe accounting mismatch
Fabrizio Castro (3): pinctrl: renesas: rza2: Fix missing of_node_put() call pinctrl: renesas: rzg2l: Fix missing of_node_put() call pinctrl: renesas: rzv2m: Fix missing of_node_put() call
Feng Tang (1): PCI/portdrv: Only disable pciehp interrupts early when needed
Feng Yang (1): ring-buffer: Fix bytes_dropped calculation issue
Fernando Fernandez Mancera (1): ipv6: fix omitted netlink attributes when using RTEXT_FILTER_SKIP_STATS
Filipe Manana (2): btrfs: get used bytes while holding lock at btrfs_reclaim_bgs_work() btrfs: fix reclaimed bytes accounting after automatic block group reclaim
Florian Fainelli (2): spi: bcm2835: Do not call gpiod_put() on invalid descriptor spi: bcm2835: Restore native CS probing when pinctrl-bcm2835 is absent
Florian Westphal (1): netfilter: nf_tables: don't unregister hook when table is dormant
Francesco Dolcini (1): arm64: dts: ti: k3-am62p: Enable AUDIO_REFCLKx
Frieder Schrempf (1): regulator: pca9450: Fix enable register for LDO5
Gabriele Monaco (1): tracing: Fix DECLARE_TRACE_CONDITION
Gao Xiang (1): erofs: allow 16-byte volume name again
Geert Uytterhoeven (5): m68k: sun3: Fix DEBUG_MMU_EMU build auxdisplay: MAX6959 should select BITREVERSE arm64: dts: renesas: r8a774c0: Re-add voltages to OPP table arm64: dts: renesas: r8a77990: Re-add voltages to OPP table drm/bridge: ti-sn65dsi86: Fix multiple instances
Geetha sowjanya (2): octeontx2-af: Fix mbox INTR handler when num VFs > 64 octeontx2-af: Free NIX_AF_INT_VEC_GEN irq
Gergo Koteles (1): ACPI: video: Handle fetching EDID as ACPI_TYPE_PACKAGE
Giovanni Gherdovich (1): ACPI: processor: idle: Return an error if both P_LVL{2,3} idle states are invalid
Greg Kroah-Hartman (1): Linux 6.14.2
Guilherme G. Piccoli (1): x86/tsc: Always save/restore TSC sched_clock() on suspend/resume
Guillaume Nault (1): tunnels: Accept PACKET_HOST in skb_tunnel_check_pmtu().
Guixin Liu (1): scsi: target: tcm_loop: Fix wrong abort tag
Hans Zhang (1): PCI: cadence-ep: Fix the driver to send MSG TLP for INTx without data payload
Hans de Goede (1): ACPI: x86: Extend Lenovo Yoga Tab 3 quirk with skip GPIO event-handlers
Heiko Stuebner (1): phy: phy-rockchip-samsung-hdptx: Don't use dt aliases to determine phy-id
Hengqi Chen (3): LoongArch: BPF: Fix off-by-one error in build_prologue() LoongArch: BPF: Don't override subprog's return value LoongArch: BPF: Use move_addr() for BPF_PSEUDO_FUNC
Henry Martin (2): ASoC: imx-card: Add NULL check in imx_card_probe() arcnet: Add NULL check in com20020pci_probe()
Herbert Xu (4): crypto: iaa - Test the correct request flag crypto: api - Fix larval relookup type and mask crypto: api - Call crypto_alg_put in crypto_unregister_alg crypto: nx - Fix uninitialised hv_nxc on error
Hermes Wu (1): drm/bridge: it6505: fix HDCP V match check is not performed correctly
Herton R. Krzesinski (1): x86/uaccess: Improve performance by aligning writes to 8 bytes in copy_user_generic(), on non-FSRM/ERMS CPUs
Hou Tao (1): bpf: Use preempt_count() directly in bpf_send_signal_common()
Hrushikesh Salunke (1): arm64: dts: ti: k3-j722s-evm: Fix USB2.0_MUX_SEL to select Type-C
Huacai Chen (2): LoongArch: Increase ARCH_DMA_MINALIGN up to 16 LoongArch: Increase MAX_IO_PICS up to 8
Ian Rogers (10): libbpf: Add namespace for errstr making it libbpf_errstr perf stat: Fix find_stat for mixed legacy/non-legacy events perf stat: Don't merge counters purely on name perf pmus: Restructure pmu_read_sysfs to scan fewer PMUs tools/x86: Fix linux/unaligned.h include path in lib/insn.c perf tests: Fix data symbol test with LTO builds perf debug: Avoid stack overflow in recursive error message perf evlist: Add success path to evlist__create_syswide_maps perf evsel: tp_format accessing improvements perf pmu: Rename name matching for no suffix or wildcard variants
Ido Schimmel (2): ipv6: Start path selection from the first nexthop ipv6: Do not consider link down nexthops in path selection
Ilkka Koskinen (2): coresight: catu: Fix number of pages while using 64k pages perf vendor events arm64 AmpereOneX: Fix frontend_bound calculation
Ilpo Järvinen (8): platform/x86: lenovo-yoga-tab2-pro-1380-fastcharger: Make symbol static platform/x86: dell-uart-backlight: Make dell_uart_bl_serdev_driver static PCI: Remove add_align overwrite unrelated to size0 PCI: Simplify size1 assignment logic PCI: Allow relaxed bridge window tail sizing for optional resources PCI: Fix BAR resizing when VF BARs are assigned PCI: pciehp: Don't enable HPIE when resuming in poll mode PCI/bwctrl: Fix pcie_bwctrl_select_speed() return type
Jacky Bai (2): cpuidle: Init cpuidle only for present CPUs cpufreq: Init cpufreq only for present CPUs
Jacob Keller (5): igb: reject invalid external timestamp requests for 82580-based HW renesas: reject PTP_STRICT_FLAGS as unsupported net: lan743x: reject unsupported external timestamp requests broadcom: fix supported flag check in periodic output function ptp: ocp: reject unsupported periodic output flags
Jakub Kicinski (1): netlink: specs: rt_route: pull the ifa- prefix out of the names
James Clark (5): perf: Always feature test reallocarray perf tests: Fix Tool PMU test segfault perf pmu: Dynamically allocate tool PMU perf pmu: Don't double count common sysfs and json events perf: intel-tpebs: Fix incorrect usage of zfree()
James Morse (1): x86/resctrl: Fix allocation of cleanest CLOSID on platforms with no monitors
Jan Glaza (3): virtchnl: make proto and filter action count unsigned ice: stop truncating queue ids when checking ice: validate queue quanta parameters to prevent OOB access
Jan Kara (1): ext4: verify fast symlink length
Jann Horn (5): rwonce: handle KCSAN like KASAN in read_word_at_a_time() rwonce: fix crash by removing READ_ONCE() for unaligned read x86/entry: Fix ORC unwinder for PUSH_REGS with save_ret=1 x86/dumpstack: Fix inaccurate unwinding from exception stacks due to misplaced assignment x86/mm: Fix flush_tlb_range() when used for zapping normal PMDs
Jason-JH Lin (1): drm/mediatek: Fix config_updating flag never false when no mbox channel
Javier Carrasco (3): iio: light: veml6030: extend regmap to support regfields iio: gts-helper: export iio_gts_get_total_gain() iio: light: veml6030: fix scale to conform to ABI
Javier Martinez Canillas (1): drm/ssd130x: Set SPI .id_table to prevent an SPI core warning
Jayesh Choudhary (1): ASoC: ti: j721e-evm: Fix clock configuration for ti,j7200-cpb-audio compatible
Jeff Chen (2): wifi: mwifiex: Fix premature release of RF calibration data. wifi: mwifiex: Fix RF calibration data download from file
Jeff Layton (2): nfsd: don't ignore the return code of svc_proc_register() nfsd: allow SC_STATUS_FREEABLE when searching via nfs4_lookup_stateid()
Jens Axboe (1): io_uring/net: improve recv bundles
Jerome Brunet (4): clk: amlogic: gxbb: drop incorrect flag on 32k clock clk: amlogic: g12b: fix cluster A parent data clk: amlogic: gxbb: drop non existing 32k clock parent clk: amlogic: g12a: fix mmc A peripheral clock
Jesse Brandeburg (1): ice: fix reservation of resources for RDMA when disabled
Jianfeng Liu (1): arm64: dts: rockchip: Fix pcie reset gpio on Orange Pi 5 Max
Jiawen Wu (2): net: libwx: fix Tx descriptor content for some tunnel packets net: libwx: fix Tx L4 checksum
Jiayuan Chen (1): bpf: Fix array bounds error with may_goto
Jie Zhan (1): cpufreq: governor: Fix negative 'idle_time' handling in dbs_update()
Jim Liu (1): net: phy: broadcom: Correct BCM5221 PHY model detection
Jim Quinlan (4): PCI: brcmstb: Set generation limit before PCIe link up PCI: brcmstb: Use internal register to change link capability PCI: brcmstb: Fix error path after a call to regulator_bulk_get() PCI: brcmstb: Fix potential premature regulator disabling
Jinghao Jia (1): samples/bpf: Fix broken vmlinux path for VMLINUX_BTF
Jiri Kosina (1): HID: remove superfluous (and wrong) Makefile entry for CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER
Jiri Olsa (1): uprobes/x86: Harden uretprobe syscall trampoline check
Jiri Slaby (SUSE) (1): tty: n_tty: use uint for space returned by tty_write_room()
Joe Hattori (2): media: platform: allgro-dvt: unregister v4l2_device on the error path soundwire: slave: fix an OF node reference leak in soundwire slave device
Johannes Berg (1): wifi: mac80211: remove SSID from ML reconf
John Keeping (3): drm/ssd130x: fix ssd132x encoding drm/ssd130x: ensure ssd132x pitch is correct drm/panel: ilitek-ili9882t: fix GPIO name in error message
Jonathan Cameron (3): iio: accel: mma8452: Ensure error return on failure to matching oversampling ratio iio: accel: msa311: Fix failure to release runtime pm if direct mode claim fails. iio: core: Rework claim and release of direct mode to work with sparse.
Jonathan Santos (1): iio: adc: ad7768-1: set MOSI idle state to prevent accidental reset
Josh Poimboeuf (11): x86/traps: Make exc_double_fault() consistently noreturn objtool: Fix detection of consecutive jump tables on Clang 20 objtool, spi: amd: Fix out-of-bounds stack access in amd_set_spi_freq() objtool, nvmet: Fix out-of-bounds stack access in nvmet_ctrl_state_show() objtool, media: dib8000: Prevent divide-by-zero in dib8000_set_dds() objtool: Fix segfault in ignore_unreachable_insn() sched/smt: Always inline sched_smt_active() context_tracking: Always inline ct_{nmi,irq}_{enter,exit}() rcu-tasks: Always inline rcu_irq_work_resched() objtool/loongarch: Add unwind hints in prepare_frametrace() spi: cadence: Fix out-of-bounds array access in cdns_mrvl_xspi_setup_clock()
José Expósito (1): drm/vkms: Fix use after free and double free on init error
Juhan Jin (1): riscv: ftrace: Add parentheses in macro definitions of make_call_t0 and make_call_ra
Juri Lelli (5): sched/deadline: Ignore special tasks when rebuilding domains sched/topology: Wrappers for sched_domains_mutex sched/deadline: Generalize unique visiting of root domains sched/deadline: Rebuild root domain accounting after every update include/{topology,cpuset}: Move dl_rebuild_rd_accounting to cpuset.h
Kai-Heng Feng (1): PCI: Use downstream bridges for distributing resources
Kan Liang (5): perf: Save PMU specific data in task_struct perf: Supply task information to sched_task() perf/x86/lbr: Fix shorter LBRs call stacks for the system-wide mode perf tools: Add skip check in tool_pmu__event_to_str() perf/x86/intel: Avoid disable PMU if !cpuc->enabled in sample read
Kang Yang (1): wifi: ath11k: add srng->lock for ath11k_hal_srng_* in monitor mode
Karan Sanghavi (1): iio: light: Add check for array bounds in veml6075_read_int_time_ms
Karel Balej (1): mmc: sdhci-pxav3: set NEED_RSP_BUSY capability
Karol Kolacinski (1): ice: ensure periodic output start time is in the future
Kees Bakker (1): RDMA/mana_ib: Ensure variable err is initialized
Kees Cook (1): kunit/stackinit: Use fill byte different from Clang i386 pattern
Kemeng Shi (1): ext4: add missing brelse() for bh2 in ext4_dx_add_entry()
Kent Overstreet (1): bcachefs: bch2_ioctl_subvolume_destroy() fixes
Kevin Loughlin (1): x86/sev: Add missing RIP_REL_REF() invocations during sme_enable()
Kirill A. Shutemov (1): x86/paravirt: Move halt paravirt calls under CONFIG_PARAVIRT
Konrad Dybcio (1): clk: qcom: gcc-x1e80100: Unregister GCC_GPU_CFG_AHB_CLK/GCC_DISP_XO_CLK
Konstantin Andreev (2): smack: dont compile ipv6 code unless ipv6 is configured smack: ipv4/ipv6: tcp/dccp/sctp: fix incorrect child socket label
Konstantin Komarov (1): fs/ntfs3: Update inode->i_mapping->a_ops on compression state
Krzysztof Kozlowski (1): drm/msm/dsi/phy: Program clock inverters in correct register
Kuninori Morimoto (1): ASoC: simple-card-utils: Don't use __free(device_node) at graph_util_parse_dai()
Kuniyuki Iwashima (4): net: Remove RTNL dance for SIOCBRADDIF and SIOCBRDELIF. rtnetlink: Use register_pernet_subsys() in rtnl_net_debug_init(). udp: Fix multiple wraparounds of sk->sk_rmem_alloc. udp: Fix memory accounting leak.
Lama Kayal (1): net/mlx5e: SHAMPO, Make reserved size independent of page size
Laurentiu Mihalcea (3): arm64: dts: imx8mp: add AUDIO_AXI_CLK_ROOT to AUDIOMIX block arm64: dts: imx8mp: change AUDIO_AXI_CLK_ROOT freq. to 800MHz clk: clk-imx8mp-audiomix: fix dsp/ocram_a clock parents
Len Brown (1): tools/power turbostat: report CoreThr per measurement interval
Leo Stone (1): f2fs: add check for deleted inode
Leo Yan (1): perf arm-spe: Fix load-store operation checking
Leon Romanovsky (1): xfrm: delay initialization of offload path till its actually requested
Li Huafei (1): watchdog/hardlockup/perf: Fix perf_event memory leak
Li Lingfeng (1): nfsd: put dl_stid if fail to queue dl_recall
Li Nan (8): md: ensure resync is prioritized over recovery badblocks: Fix error shitf ops badblocks: factor out a helper try_adjacent_combine badblocks: attempt to merge adjacent badblocks during ack_all_badblocks badblocks: return error directly when setting badblocks exceeds 512 badblocks: return error if any badblock set fails badblocks: fix the using of MAX_BADBLOCKS badblocks: fix merge issue when new badblocks align with pre+1
Liang Jie (1): wifi: rtw89: Correct immediate cfg_len calculation for scan_offload_be
Likhitha Korrapati (1): perf tools: Fix is_compat_mode build break in ppc64
Lin Ma (2): netfilter: nft_tunnel: fix geneve_opt type confusion addition net: fix geneve_opt length integer overflow
Lizhi Hou (1): accel/amdxdna: Return error when setting clock failed for npu1
Lorenzo Bianconi (4): net: airoha: Fix lan4 support in airoha_qdma_get_gdm_port() PCI: mediatek-gen3: Configure PBUS_CSR registers for EN7581 SoC net: airoha: Fix qid report in airoha_tc_get_htb_get_leaf_queue() net: airoha: Fix ETS priomap validation
Louis-Alexis Eyraud (3): arm64: dts: mediatek: mt8390-genio-700-evk: Move common parts to dtsi arm64: dts: mediatek: mt8390-genio-common: Fix duplicated regulator name ASoC: mediatek: mt6359: Fix DT parse error due to wrong child node name
Lubomir Rintel (1): rndis_host: Flag RNDIS modems as WWAN devices
Luca Ceresoli (1): perf build: Fix in-tree build due to symbolic link
Luca Weiss (4): remoteproc: qcom_q6v5_pas: Make single-PD handling more robust remoteproc: qcom: pas: add minidump_id to SC7280 WPSS remoteproc: qcom_q6v5_pas: Use resource with CX PD for MSM8226 remoteproc: qcom_q6v5_mss: Handle platforms with one power domain
Luiz Augusto von Dentz (2): Bluetooth: hci_core: Enable buffer flow control for SCO/eSCO Bluetooth: hci_event: Fix handling of HCI_EV_LE_DIRECT_ADV_REPORT
Lukas Wunner (1): PCI/bwctrl: Fix NULL pointer dereference on bus number exhaustion
Lukasz Czapnik (1): ice: fix input validation for virtchnl BW
Macpaul Lin (1): arm64: dts: mediatek: mt6359: fix dtbs_check error for audio-codec
Maher Sanalla (1): IB/mad: Check available slots before posting receive WRs
Maksim Davydov (1): x86/split_lock: Fix the delayed detection logic
Manikanta Mylavarapu (2): drivers: clk: qcom: ipq5424: fix the freq table of sdcc1_apps clock clk: qcom: ipq5424: fix software and hardware flow control error of UART
Manivannan Sadhasivam (2): wifi: ath11k: Clear affinity hint before calling ath11k_pcic_free_irq() in error path wifi: ath12k: Clear affinity hint before calling ath12k_pci_free_irq() in error path
Marcus Meissner (1): perf tools: annotate asm_pure_loop.S
Marek Behún (5): net: dsa: mv88e6xxx: fix atu_move_port_mask for 6341 family net: dsa: mv88e6xxx: enable PVT for 6321 switch net: dsa: mv88e6xxx: enable .port_set_policy() for 6320 family net: dsa: mv88e6xxx: fix VTU methods for 6320 family net: dsa: mv88e6xxx: enable STU methods for 6320 family
Marijn Suijten (4): drm/msm/dsi: Use existing per-interface slice count in DSC timing drm/msm/dsi: Set PHY usescase (and mode) before registering DSI host drm/msm/dpu: Fall back to a single DSC encoder (1:1:1) on small SoCs drm/msm/dpu: Remove arbitrary limit of 1 interface in DSC topology
Mario Limonciello (1): ucsi_ccg: Don't show failed to get FW build information error
Mark Bloch (1): net/mlx5: LAG, reload representors on LAG creation failure
Mark Harmstone (1): btrfs: don't clobber ret in btrfs_validate_super()
Mark Zhang (1): rtnetlink: Allocate vfinfo size for VF GUIDs when supported
Markus Elfring (2): fbdev: au1100fb: Move a variable assignment behind a null pointer check ntb_perf: Delete duplicate dmaengine_unmap_put() call in perf_copy_chunk()
Mateusz Polchlopek (1): ice: fix using untrusted value of pkt_len in ice_vc_fdir_parse_raw()
Maud Spierings (1): dt-bindings: vendor-prefixes: add GOcontroll
Maurizio Lombardi (1): nvme-pci: skip nvme_write_sq_db on empty rqlist
Max Kellermann (3): io_uring/io-wq: eliminate redundant io_work_get_acct() calls io_uring/io-wq: cache work->flags in variable io_uring/io-wq: do not use bogus hash value
Max Merchel (1): ARM: dts: imx6ul-tqma6ul1: Change include order to disable fec2 node
Maxim Mikityanskiy (1): net/mlx5e: Fix ethtool -N flow-type ip4 to RSS context
Miaoqian Lin (3): ksmbd: use aead_request_free to match aead_request_alloc LoongArch: Fix device node refcount leak in fdt_cpu_clk_init() mmc: omap: Fix memory leak in mmc_omap_new_slot
Michael Chan (2): bnxt_en: Mask the bd_cnt field in the TX BD properly bnxt_en: Linearize TX SKB if the fragments exceed the max
Michael Guralnik (2): RDMA/mlx5: Fix page_size variable overflow RDMA/mlx5: Fix MR cache initialization error flow
Michael Jeanson (1): rseq: Update kernel fields in lockstep with CONFIG_DEBUG_RSEQ=y
Michael Walle (2): arm64: dts: ti: k3-am62p: fix pinctrl settings arm64: dts: ti: k3-j722s: fix pinctrl settings
Mike Christie (1): vhost-scsi: Fix handling of multiple calls to vhost_scsi_set_endpoint
Mike Rapoport (Microsoft) (1): x86/mm/pat: cpa-test: fix length for CPA_ARRAY test
Mikhail Lobanov (1): wifi: mac80211: check basic rates validity in sta_link_apply_parameters
Ming Lei (2): block: fix adding folio to bio ublk: make sure ubq->canceling is set when queue is frozen
Ming Yen Hsieh (2): wifi: mt76: mt7925: remove unused acpi function for clc wifi: mt76: mt7921: fix kernel panic due to null pointer dereference
Moshe Shemesh (1): net/mlx5: Start health poll after enable hca
Murad Masimov (3): ax25: Remove broken autobind acpi: nfit: fix narrowing conversion in acpi_nfit_ctl media: streamzap: fix race between device disconnection and urb callback
Namhyung Kim (4): perf report: Switch data file correctly in TUI perf machine: Fixup kernel maps ends after adding extra maps perf test: Add timeout to datasym workload perf bpf-filter: Fix a parsing error with comma
Namjae Jeon (6): ksmbd: fix multichannel connection failure ksmbd: fix r_count dec/increment mismatch ksmbd: add bounds check for durable handle context ksmbd: fix use-after-free in ksmbd_sessions_deregister() ksmbd: fix session use-after-free in multichannel connection ksmbd: fix null pointer dereference in alloc_preauth_hash()
Nathan Chancellor (3): crypto: tegra - Fix format specifier in tegra_sha_prep_cmd() ACPI: platform-profile: Fix CFI violation when accessing sysfs files ARM: 9443/1: Require linker to support KEEP within OVERLAY for DCE
Neeraj Sanjay Kale (1): Bluetooth: btnxpuart: Fix kernel panic during FW release
Neil Armstrong (1): clk: qcom: gcc-sm8650: Do not turn off USB GDSCs during gdsc_disable()
NeilBrown (1): NFS: fix open_owner_id_maxsz and related fields.
Nick Child (1): ibmvnic: Use kernel helpers for hex dumps
Nicolas Bouchinet (1): coredump: Fixes core_pipe_limit sysctl proc_handler
Nicolas Escande (2): wifi: ath12k: fix skb_ext_desc leak in ath12k_dp_tx() error path wifi: ath12k: Add missing htt_metadata flag in ath12k_dp_tx()
Nicolas Frattaroli (1): arm64: dts: rockchip: remove ethm0_clk0_25m_out from Sige5 gmac0
Nikita Shubin (1): ntb: intel: Fix using link status DB's
Nikita Zhandarovich (3): wifi: mt76: mt7915: fix possible integer overflows in mt7915_muru_stats_show() mfd: sm501: Switch to BIT() to mitigate integer overflows media: vimc: skip .s_stream() for stopped entities
Niklas Cassel (5): ata: libata: Fix NCQ Non-Data log not supported print nvmet: pci-epf: Always configure BAR0 as 64-bit misc: pci_endpoint_test: Fix pci_endpoint_test_bars_read_bar() error handling misc: pci_endpoint_test: Handle BAR sizes larger than INT_MAX PCI: endpoint: pci-epf-test: Handle endianness properly
Niklas Neronin (1): usb: xhci: correct debug message page size calculation
Niklas Schnelle (1): s390: Remove ioremap_wt() and pgprot_writethrough()
Nishanth Aravamudan (1): PCI: Avoid reset when disabled via sysfs
Norbert Szetei (3): ksmbd: add bounds check for create lease context ksmbd: fix overflow in dacloffset bounds check ksmbd: validate zero num_subauth before sub_auth is accessed
Nuno Sá (1): iio: backend: make sure to NULL terminate stack buffer
Ojaswin Mujoo (2): ext4: define ext4_journal_destroy wrapper ext4: avoid journaling sb update on error if journal is destroying
Oleg Nesterov (2): seccomp: fix the __secure_computing() stub for !HAVE_ARCH_SECCOMP_FILTER exec: fix the racy usage of fs_struct->in_exec
Oleksij Rempel (1): net: dsa: microchip: fix DCB apptrust configuration on KSZ88x3
Olga Kornievskaia (1): nfsd: fix management of listener transports
Olivia Mackintosh (1): ALSA: usb-audio: separate DJM-A9 cap lvl options
P Praneesh (1): wifi: ath11k: fix RCU stall while reaping monitor destination ring
Pablo Neira Ayuso (1): netfilter: nft_set_hash: GC reaps elements with conncount for dynamic sets only
Palmer Dabbelt (1): RISC-V: errata: Use medany for relocatable builds
Paolo Bonzini (1): KVM: x86: block KVM_CAP_SYNC_REGS if guest state is protected
Patrisious Haddad (1): RDMA/mlx5: Fix mlx5_poll_one() cur_qp update flow
Paul Menzel (2): scsi: mpt3sas: Reduce log level of ignore_delay_remove message to KERN_INFO ACPI: resource: Skip IRQ override on ASUS Vivobook 14 X1404VAP
Pavel Begunkov (2): io_uring: check for iowq alloc_workqueue failure io_uring: fix retry handling off iowq
Pedro Nishiyama (3): Bluetooth: Add quirk for broken READ_VOICE_SETTING Bluetooth: Add quirk for broken READ_PAGE_SCAN_TYPE Bluetooth: btusb: Fix regression in the initialization of fake Bluetooth controllers
Peng Fan (3): remoteproc: core: Clear table_sz when rproc_shutdown dmaengine: fsl-edma: cleanup chan after dma_async_device_unregister dmaengine: fsl-edma: free irq correctly in remove path
Peter Geis (1): clk: rockchip: rk3328: fix wrong clk_ref_usb3otg parent
Peter Zijlstra (1): lockdep/mm: Fix might_fault() lockdep check of current->mm->mmap_lock
Peter Zijlstra (Intel) (1): perf/x86/intel: Apply static call for drain_pebs
Ping-Ke Shih (2): wifi: rtw89: fw: correct debug message format in rtw89_build_txpwr_trk_tbl_from_elm() wifi: rtw89: pci: correct ISR RDU bit for 8922AE
Piotr Kwapulinski (1): ixgbe: fix media type detection for E610 device
Pranjal Shrivastava (1): net: Fix the devmem sock opts and msgs for parisc
Prathamesh Shete (1): pinctrl: tegra: Set SFIO mode to Mux Register
Przemek Kitszel (1): ice: health.c: fix compilation on gcc 7.5
Pu Lehui (2): riscv: fgraph: Select HAVE_FUNCTION_GRAPH_TRACER depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS riscv: fgraph: Fix stack layout to match __arch_ftrace_regs argument of ftrace_return_to_handler
Qasim Ijaz (2): isofs: fix KMSAN uninit-value bug in do_isofs_readdir() jfs: fix slab-out-of-bounds read in ea_get()
Qiuxu Zhuo (5): EDAC/igen6: Fix the flood of invalid error reports EDAC/{skx_common,i10nm}: Fix some missing error reports on Emerald Rapids EDAC/ie31200: Fix the size of EDAC_MC_LAYER_CHIP_SELECT layer EDAC/ie31200: Fix the DIMM size mask for several SoCs EDAC/ie31200: Fix the error path order of ie31200_init()
Rafael J. Wysocki (2): PM: sleep: Adjust check before setting power.must_resume PM: sleep: Fix handling devices with direct_complete set on errors
Rameshkumar Sundaram (1): wifi: ath12k: Fix pdev lookup in WBM error processing
Ran Xiaokai (1): tracing/osnoise: Fix possible recursive locking for cpus_read_lock()
Remi Pommarel (1): leds: Fix LED_OFF brightness race
Richard Fitzgerald (1): firmware: cs_dsp: Ensure cs_dsp_load[_coeff]() returns 0 on success
Ritu Chaudhary (1): ASoC: tegra: Use non-atomic timeout for ADX status register
Rob Clark (1): drm/msm/a6xx: Fix a6xx indexed-regs in devcoreduump
Robin Murphy (2): iommu: Handle race with default domain setup media: omap3isp: Handle ARM dma_iommu_mapping
Robin van der Gracht (1): can: rockchip_canfd: rkcanfd_chip_fifo_setup(): remove duplicated setup of RX FIFO
Roman Gushchin (1): RDMA/core: Don't expose hw_counters outside of init net namespace
Roman Smirnov (1): jfs: add index corruption check to DT_GETPAGE()
Saket Kumar Bhaskar (1): selftests/bpf: Select NUMA_NO_NODE to create map
Saleemkhan Jamadar (1): drm/amdgpu/umsch: remove vpe test from umsch
Sankararaman Jayaraman (1): vmxnet3: unregister xdp rxq info in the reset path
Sathishkumar Muruganandam (1): wifi: ath12k: encode max Tx power in scan channel list command
Sean Christopherson (1): KVM: SVM: Don't change target vCPU state on AP Creation VMGEXIT error
Sebastian Andrzej Siewior (1): lockdep: Don't disable interrupts on RT in disable_irq_nosync_lockdep.*()
Shay Drory (1): PCI: Fix NULL dereference in SR-IOV VF creation error path
Sherry Sun (4): tty: serial: fsl_lpuart: Use u32 and u8 for register variables tty: serial: fsl_lpuart: use port struct directly to simply code tty: serial: fsl_lpuart: Fix unused variable 'sport' build warning tty: serial: lpuart: only disable CTS instead of overwriting the whole UARTMODIR register
Shuai Xue (1): x86/mce: use is_copy_from_user() to determine copy-from-user context
Sicelo A. Mhlongo (1): power: supply: bq27xxx_battery: do not update cached flags prematurely
Song Yoong Siang (3): xsk: Add launch time hardware offload support to XDP Tx metadata igc: Refactor empty frame insertion for launch time support igc: Add launch time support to XDP ZC
Sourabh Jain (1): kexec: initialize ELF lowest address to ULONG_MAX
Srinivas Pandruvada (1): platform/x86: ISST: Correct command storage data length
Srinivasan Shanmugam (2): drm/amdgpu: Replace Mutex with Spinlock for RLCG register access to avoid Priority Inversion in SRIOV drm/amdkfd: Fix Circular Locking Dependency in 'svm_range_cpu_invalidate_pagetables'
Stanislav Spassov (1): x86/fpu: Fix guest FPU state buffer allocation size
Stanley Chu (1): i3c: master: svc: Fix missing the IBI rules
Stefan Eichenberger (1): arm64: dts: ti: k3-am62-verdin-dahlia: add Microphone Jack to sound card
Stefan Wahren (3): staging: vchiq_arm: Register debugfs after cdev staging: vchiq_arm: Fix possible NPR of keep-alive thread staging: vchiq_arm: Stop kthreads if vchiq cdev register fails
Stefano Garzarella (1): vsock: avoid timeout during connect() if the socket is closing
Stephen Brennan (1): perf dso: fix dso__is_kallsyms() check
Steven Price (1): drm/panthor: Clean up FW version information display
Steven Rostedt (2): tracing: Verify event formats that have "%*p.." tracing: Do not use PERF enums when perf is not defined
Su Yue (1): md/md-bitmap: fix wrong bitmap_limit for clustermd when write sb
Sudeep Holla (4): firmware: arm_ffa: Unregister the FF-A devices when cleaning up the partitions firmware: arm_ffa: Explicitly cast return value from FFA_VERSION before comparison firmware: arm_ffa: Explicitly cast return value from NOTIFICATION_INFO_GET firmware: arm_ffa: Skip the first/partition ID when parsing vCPU list
Sungjong Seo (2): exfat: fix random stack corruption after get_block exfat: fix potential wrong error return from get_block
Suzuki K Poulose (3): dma: Fix encryption bit clearing for dma_to_phys dma: Introduce generic dma_addr_*crypted helpers arm64: realm: Use aliased addresses for device DMA to shared buffers
Sven Schnelle (1): s390/entry: Fix setting _CIF_MCCK_GUEST with lowcore relocation
Taehee Yoo (1): eth: bnxt: fix out-of-range access of vnic_info array
Takashi Iwai (5): ALSA: hda/realtek: Always honor no_shutup_pins ALSA: timer: Don't take register_mutex with copy_from/to_user() ALSA: hda/realtek: Fix built-in mic assignment on ASUS VivoBook X515UA ALSA: hda/realtek: Fix built-in mic breakage on ASUS VivoBook X515JA ALSA: hda/realtek: Fix built-in mic on another ASUS VivoBook model
Tang Yizhou (2): writeback: let trace_balance_dirty_pages() take struct dtc as parameter writeback: fix calculations in trace_balance_dirty_pages() for cgwb
Tanya Agarwal (1): lib: 842: Improve error handling in sw842_compress()
Tao Chen (1): perf/ring_buffer: Allow the EPOLLRDNORM flag for poll
Tengda Wu (2): selftests/bpf: Fix freplace_link segfault in tailcalls prog test tracing: Fix use-after-free in print_graph_function_flags during tracer switching
Thadeu Lima de Souza Cascardo (2): dlm: prevent NPD when writing a positive value to event_done drm/amd/display: avoid NPD when ASIC does not support DMUB
Theodore Ts'o (1): ext4: don't over-report free space or inodes in statvfs
Thippeswamy Havalige (1): PCI: xilinx-cpm: Fix IRQ domain leak in error path of probe
Thomas Richter (3): perf test: Fix Hwmon PMU test endianess issue perf bench: Fix perf bench syscall loop count perf pmu: Handle memory failure in tool_pmu__new()
Thomas Weißschuh (2): x86/vdso: Fix latent bug in vclock_pages calculation leds: st1202: Check for error code from devm_mutex_init() call
Thorsten Blum (1): m68k: sun3: Use str_read_write() helper in mmu_emu_handle_fault()
Tianchen Ding (1): sched/eevdf: Force propagating min_slice of cfs_rq when {en,de}queue tasks
Tianyu Lan (1): x86/hyperv: Fix check of return value from snp_set_vmsa()
Tiezhu Yang (3): objtool: Handle various symbol types of rodata objtool: Handle different entry size of rodata objtool: Handle PC relative relocation type
Tim Schumacher (1): selinux: Chain up tool resolving errors in install_policy.sh
Tingbo Liao (1): riscv: Fix the __riscv_copy_vec_words_unaligned implementation
Tobias Waldekranz (1): net: mvpp2: Prevent parser TCAM memory corruption
Tom Rini (1): ARM: dts: omap4-panda-a4: Add missing model and compatible properties
Tomas Glozar (1): tools/rv: Keep user LDFLAGS in build
Tomi Valkeinen (1): drm: xlnx: zynqmp: Fix max dma segment size
Tony Ambardar (2): selftests/bpf: Fix runqslower cross-endian build libbpf: Fix accessing BTF.ext core_relo header
Trond Myklebust (5): NFSv4: Don't trigger uneccessary scans for return-on-close delegations NFSv4: Avoid unnecessary scans of filesystems for returning delegations NFSv4: Avoid unnecessary scans of filesystems for expired delegations NFSv4: Avoid unnecessary scans of filesystems for delayed delegations NFS: Shut down the nfs_client only after all the superblocks
Tushar Dave (1): PCI/ACS: Fix 'pci=config_acs=' parameter
Ulf Hansson (1): mmc: sdhci-omap: Disable MMC_CAP_AGGRESSIVE_PM for eMMC/SD
Uwe Kleine-König (8): iio: adc: ad7124: Micro-optimize channel disabling iio: adc: ad7124: Really disable all channels at probe time iio: adc: ad7173: Grab direct mode for calibration iio: adc: ad7192: Grab direct mode for calibration iio: adc: ad_sigma_delta: Disable channel after calibration iio: adc: ad4130: Fix comparison of channel setups iio: adc: ad7124: Fix comparison of channel configs iio: adc: ad7173: Fix comparison of channel configs
Vaibhav Jain (1): powerpc/perf: Fix ref-counting on the PMU 'vpa_pmu'
Vasant Hegde (1): iommu/amd: Fix header file
Vasiliy Kovalev (3): jfs: add check read-only before txBeginAnon() call jfs: add check read-only before truncation in jfs_truncate_nolock() ocfs2: validate l_tree_depth to avoid out-of-bounds access
Venkata Prasad Potturu (1): ASoC: amd: acp: Fix for enabling DMIC on acp platforms via _DSD entry
Veronika Molnarova (1): perf test stat_all_pmu.sh: Correctly check 'perf stat' result
Viktor Malik (1): selftests/bpf: Fix string read in strncmp benchmark
Viresh Kumar (1): firmware: arm_ffa: Refactor addition of partition information into XArray
Vishal Annapurve (1): x86/tdx: Fix arch_safe_halt() execution for TDX VMs
Vitalii Mordan (1): gpu: cdns-mhdp8546: fix call balance of mhdp->clk handling routines
Vitaliy Shevtsov (2): ASoC: cs35l41: check the return value from spi_setup() drm/amd/display: fix type mismatch in CalculateDynamicMetadataParameters()
Vitaly Kuznetsov (1): x86/entry: Add __init to ia32_emulation_override_cmdline()
Vitaly Lifshits (1): e1000e: change k1 configuration on MTP and later platforms
Vladimir Lypak (1): clk: qcom: gcc-msm8953: fix stuck venus0_core0 clock
Vladimir Oltean (3): net: dsa: sja1105: fix displaced ethtool statistics counters net: dsa: sja1105: reject other RX filters than HWTSTAMP_FILTER_PTP_V2_L2_EVENT net: dsa: sja1105: fix kasan out-of-bounds warning in sja1105_table_delete_entry()
WANG Rui (1): rust: Fix enabling Rust and building with GCC for LoongArch
Wang Liang (4): bonding: check xdp prog when set bond mode net: fix NULL pointer dereference in l3mdev_l3_rcv RDMA/core: Fix use-after-free when rename device name xsk: Fix __xsk_generic_xmit() error code when cq is full
Wang Zhaolong (1): smb: client: Fix netns refcount imbalance causing leaks and use-after-free
WangYuli (2): netfilter: nf_tables: Only use nf_skip_indirect_calls() when MITIGATION_RETPOLINE mlxsw: spectrum_acl_bloom_filter: Workaround for some LLVM versions
Wayne Lin (1): drm/dp_mst: Fix drm RAD print
Wen Gong (1): wifi: ath11k: update channel list in reg notifier instead reg worker
Wenkai Lin (3): crypto: hisilicon/sec2 - fix for aead authsize alignment crypto: hisilicon/sec2 - fix for sec spec check crypto: hisilicon/sec2 - fix for aead auth key length
Wentao Guan (1): Bluetooth: HCI: Add definition of hci_rp_remote_name_req_cancel
Wentao Liang (1): greybus: gb-beagleplay: Add error handling for gb_greybus_init
Will McVicker (1): clk: samsung: Fix UBSAN panic in samsung_clk_init()
Xiao Ni (1): md/raid10: wait barrier before returning discard request with REQ_NOWAIT
Xingui Yang (1): scsi: hisi_sas: Fixed failure to issue vendor specific commands
Xueqi Zhang (1): memory: mtk-smi: Add ostd setting for mt8192
Yajun Deng (1): ntb_hw_switchtec: Fix shift-out-of-bounds in switchtec_ntb_mw_set_trans
Yang Wang (1): drm/amdgpu: refine smu send msg debug log format
Yao Zi (2): arm64: dts: rockchip: Fix PWM pinctrl names riscv/kexec_file: Handle R_RISCV_64 in purgatory relocator
Ye Bin (5): ext4: introduce ITAIL helper ext4: fix out-of-bound read in ext4_xattr_inode_dec_ref_all() fs/ntfs3: Factor out ntfs_{create/remove}_procdir() fs/ntfs3: Factor out ntfs_{create/remove}_proc_root() fs/ntfs3: Fix 'proc_info_root' leak when init ntfs failed
Yeoreum Yun (1): perf/core: Fix child_total_time_enabled accounting bug at task exit
Yi Lai (1): selftests/pcie_bwctrl: Add 'set_pcie_speed.sh' to TEST_PROGS
Ying Lu (1): usbnet:fix NPE during rx_complete
Yosry Ahmed (1): mm: zswap: fix crypto_free_acomp() deadlock in zswap_cpu_comp_dead()
Yu Kuai (3): md: fix mddev uaf while iterating all_mddevs list md/raid1,raid10: don't ignore IO flags blk-throttle: fix lower bps rate by throtl_trim_slice()
Yu Zhang(Yuriy) (1): wifi: ath11k: fix wrong overriding for VHT Beamformee STS Capability
Yuanfang Zhang (1): coresight-etm4x: add isb() before reading the TRCSTATR
Yue Haibing (2): pinctrl: nuvoton: npcm8xx: Fix error handling in npcm8xx_gpio_fw() drm/xe: Fix unmet direct dependencies warning
Yuezhang Mo (2): exfat: fix the infinite loop in exfat_find_last_cluster() exfat: fix missing shutdown check
Yuli Wang (1): LoongArch: Rework the arch_kgdb_breakpoint() implementation
Yunhui Cui (1): iommu/vt-d: Fix system hang on reboot -f
Zdenek Bouska (1): igc: Fix TX drops in XDP ZC
Zhang Rui (2): tools/power turbostat: Allow Zero return value for some RAPL registers tools/power turbostat: Restore GFX sysfs fflush() call
Zhang Yi (2): jbd2: fix off-by-one while erasing journal jbd2: add a missing data flush during file and fs synchronization
Zheng Qixing (4): md/raid1: fix memory leak in raid1_run() if no active rdev badblocks: fix missing bad blocks on retry in _badblocks_check() badblocks: return boolean from badblocks_set() and badblocks_clear() badblocks: use sector_t instead of int to avoid truncation of badblocks length
Zijun Hu (1): of: property: Increase NR_FWNODE_REFERENCE_ARGS
xueqin Luo (1): thermal: core: Remove duplicate struct declaration
zihan zhou (1): sched: Cancel the slice protection of the idle entity
zuoqian (1): cpufreq: scpi: compare kHz instead of Hz
谢致邦 (XIE Zhibang) (2): staging: rtl8723bs: select CONFIG_CRYPTO_LIB_AES LoongArch: Fix help text of CMDLINE_EXTEND in Kconfig
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 5079ca6ce1d1..b5979832ddce 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -593,6 +593,8 @@ patternProperties: description: GlobalTop Technology, Inc. "^gmt,.*": description: Global Mixed-mode Technology, Inc. + "^gocontroll,.*": + description: GOcontroll Modular Embedded Electronics B.V. "^goldelico,.*": description: Golden Delicious Computers GmbH & Co. KG "^goodix,.*": diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml index cbb544bd6c84..901b5afb3df0 100644 --- a/Documentation/netlink/specs/netdev.yaml +++ b/Documentation/netlink/specs/netdev.yaml @@ -70,6 +70,10 @@ definitions: name: tx-checksum doc: L3 checksum HW offload is supported by the driver. + - + name: tx-launch-time-fifo + doc: + Launch time HW offload is supported by the driver. - name: queue-type type: enum diff --git a/Documentation/netlink/specs/rt_route.yaml b/Documentation/netlink/specs/rt_route.yaml index a674103e5bc4..292469c7d4b9 100644 --- a/Documentation/netlink/specs/rt_route.yaml +++ b/Documentation/netlink/specs/rt_route.yaml @@ -80,165 +80,167 @@ definitions: attribute-sets: - name: route-attrs + name-prefix: rta- attributes: - - name: rta-dst + name: dst type: binary display-hint: ipv4 - - name: rta-src + name: src type: binary display-hint: ipv4 - - name: rta-iif + name: iif type: u32 - - name: rta-oif + name: oif type: u32 - - name: rta-gateway + name: gateway type: binary display-hint: ipv4 - - name: rta-priority + name: priority type: u32 - - name: rta-prefsrc + name: prefsrc type: binary display-hint: ipv4 - - name: rta-metrics + name: metrics type: nest - nested-attributes: rta-metrics + nested-attributes: metrics - - name: rta-multipath + name: multipath type: binary - - name: rta-protoinfo # not used + name: protoinfo # not used type: binary - - name: rta-flow + name: flow type: u32 - - name: rta-cacheinfo + name: cacheinfo type: binary struct: rta-cacheinfo - - name: rta-session # not used + name: session # not used type: binary - - name: rta-mp-algo # not used + name: mp-algo # not used type: binary - - name: rta-table + name: table type: u32 - - name: rta-mark + name: mark type: u32 - - name: rta-mfc-stats + name: mfc-stats type: binary - - name: rta-via + name: via type: binary - - name: rta-newdst + name: newdst type: binary - - name: rta-pref + name: pref type: u8 - - name: rta-encap-type + name: encap-type type: u16 - - name: rta-encap + name: encap type: binary # tunnel specific nest - - name: rta-expires + name: expires type: u32 - - name: rta-pad + name: pad type: binary - - name: rta-uid + name: uid type: u32 - - name: rta-ttl-propagate + name: ttl-propagate type: u8 - - name: rta-ip-proto + name: ip-proto type: u8 - - name: rta-sport + name: sport type: u16 - - name: rta-dport + name: dport type: u16 - - name: rta-nh-id + name: nh-id type: u32 - - name: rta-flowlabel + name: flowlabel type: u32 byte-order: big-endian display-hint: hex - - name: rta-metrics + name: metrics + name-prefix: rtax- attributes: - - name: rtax-unspec + name: unspec type: unused value: 0 - - name: rtax-lock + name: lock type: u32 - - name: rtax-mtu + name: mtu type: u32 - - name: rtax-window + name: window type: u32 - - name: rtax-rtt + name: rtt type: u32 - - name: rtax-rttvar + name: rttvar type: u32 - - name: rtax-ssthresh + name: ssthresh type: u32 - - name: rtax-cwnd + name: cwnd type: u32 - - name: rtax-advmss + name: advmss type: u32 - - name: rtax-reordering + name: reordering type: u32 - - name: rtax-hoplimit + name: hoplimit type: u32 - - name: rtax-initcwnd + name: initcwnd type: u32 - - name: rtax-features + name: features type: u32 - - name: rtax-rto-min + name: rto-min type: u32 - - name: rtax-initrwnd + name: initrwnd type: u32 - - name: rtax-quickack + name: quickack type: u32 - - name: rtax-cc-algo + name: cc-algo type: string - - name: rtax-fastopen-no-cookie + name: fastopen-no-cookie type: u32
operations: @@ -254,18 +256,18 @@ operations: value: 26 attributes: - rtm-family - - rta-src + - src - rtm-src-len - - rta-dst + - dst - rtm-dst-len - - rta-iif - - rta-oif - - rta-ip-proto - - rta-sport - - rta-dport - - rta-mark - - rta-uid - - rta-flowlabel + - iif + - oif + - ip-proto + - sport + - dport + - mark + - uid + - flowlabel reply: value: 24 attributes: &all-route-attrs @@ -278,34 +280,34 @@ operations: - rtm-scope - rtm-type - rtm-flags - - rta-dst - - rta-src - - rta-iif - - rta-oif - - rta-gateway - - rta-priority - - rta-prefsrc - - rta-metrics - - rta-multipath - - rta-flow - - rta-cacheinfo - - rta-table - - rta-mark - - rta-mfc-stats - - rta-via - - rta-newdst - - rta-pref - - rta-encap-type - - rta-encap - - rta-expires - - rta-pad - - rta-uid - - rta-ttl-propagate - - rta-ip-proto - - rta-sport - - rta-dport - - rta-nh-id - - rta-flowlabel + - dst + - src + - iif + - oif + - gateway + - priority + - prefsrc + - metrics + - multipath + - flow + - cacheinfo + - table + - mark + - mfc-stats + - via + - newdst + - pref + - encap-type + - encap + - expires + - pad + - uid + - ttl-propagate + - ip-proto + - sport + - dport + - nh-id + - flowlabel dump: request: value: 26 diff --git a/Documentation/networking/xsk-tx-metadata.rst b/Documentation/networking/xsk-tx-metadata.rst index e76b0cfc32f7..df53a10ccac3 100644 --- a/Documentation/networking/xsk-tx-metadata.rst +++ b/Documentation/networking/xsk-tx-metadata.rst @@ -50,6 +50,10 @@ The flags field enables the particular offload: checksum. ``csum_start`` specifies byte offset of where the checksumming should start and ``csum_offset`` specifies byte offset where the device should store the computed checksum. +- ``XDP_TXMD_FLAGS_LAUNCH_TIME``: requests the device to schedule the + packet for transmission at a pre-determined time called launch time. The + value of launch time is indicated by ``launch_time`` field of + ``union xsk_tx_metadata``.
Besides the flags above, in order to trigger the offloads, the first packet's ``struct xdp_desc`` descriptor should set ``XDP_TX_METADATA`` @@ -65,6 +69,63 @@ In this case, when running in ``XDK_COPY`` mode, the TX checksum is calculated on the CPU. Do not enable this option in production because it will negatively affect performance.
+Launch Time +=========== + +The value of the requested launch time should be based on the device's PTP +Hardware Clock (PHC) to ensure accuracy. AF_XDP takes a different data path +compared to the ETF queuing discipline, which organizes packets and delays +their transmission. Instead, AF_XDP immediately hands off the packets to +the device driver without rearranging their order or holding them prior to +transmission. Since the driver maintains FIFO behavior and does not perform +packet reordering, a packet with a launch time request will block other +packets in the same Tx Queue until it is sent. Therefore, it is recommended +to allocate separate queue for scheduling traffic that is intended for +future transmission. + +In scenarios where the launch time offload feature is disabled, the device +driver is expected to disregard the launch time request. For correct +interpretation and meaningful operation, the launch time should never be +set to a value larger than the farthest programmable time in the future +(the horizon). Different devices have different hardware limitations on the +launch time offload feature. + +stmmac driver +------------- + +For stmmac, TSO and launch time (TBS) features are mutually exclusive for +each individual Tx Queue. By default, the driver configures Tx Queue 0 to +support TSO and the rest of the Tx Queues to support TBS. The launch time +hardware offload feature can be enabled or disabled by using the tc-etf +command to call the driver's ndo_setup_tc() callback. + +The value of the launch time that is programmed in the Enhanced Normal +Transmit Descriptors is a 32-bit value, where the most significant 8 bits +represent the time in seconds and the remaining 24 bits represent the time +in 256 ns increments. The programmed launch time is compared against the +PTP time (bits[39:8]) and rolls over after 256 seconds. Therefore, the +horizon of the launch time for dwmac4 and dwxlgmac2 is 128 seconds in the +future. + +igc driver +---------- + +For igc, all four Tx Queues support the launch time feature. The launch +time hardware offload feature can be enabled or disabled by using the +tc-etf command to call the driver's ndo_setup_tc() callback. When entering +TSN mode, the igc driver will reset the device and create a default Qbv +schedule with a 1-second cycle time, with all Tx Queues open at all times. + +The value of the launch time that is programmed in the Advanced Transmit +Context Descriptor is a relative offset to the starting time of the Qbv +transmission window of the queue. The Frst flag of the descriptor can be +set to schedule the packet for the next Qbv cycle. Therefore, the horizon +of the launch time for i225 and i226 is the ending time of the next cycle +of the Qbv transmission window of the queue. For example, when the Qbv +cycle time is set to 1 second, the horizon of the launch time ranges +from 1 second to 2 seconds, depending on where the Qbv cycle is currently +running. + Querying Device Capabilities ============================
@@ -74,6 +135,7 @@ Refer to ``xsk-flags`` features bitmask in
- ``tx-timestamp``: device supports ``XDP_TXMD_FLAGS_TIMESTAMP`` - ``tx-checksum``: device supports ``XDP_TXMD_FLAGS_CHECKSUM`` +- ``tx-launch-time-fifo``: device supports ``XDP_TXMD_FLAGS_LAUNCH_TIME``
See ``tools/net/ynl/samples/netdev.c`` on how to query this information.
diff --git a/Makefile b/Makefile index 3ede59c1146c..907a4565f06a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 14 -SUBLEVEL = 1 +SUBLEVEL = 2 EXTRAVERSION = NAME = Baby Opossum Posse
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 835b5f100e92..f3f6b7a33b79 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -121,7 +121,7 @@ config ARM select HAVE_KERNEL_XZ select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M select HAVE_KRETPROBES if HAVE_KPROBES - select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_IS_LLD) + select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_CAN_USE_KEEP_IN_OVERLAY) select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_OPTPROBES if !THUMB2_KERNEL diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1-mba6ulx.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1-mba6ulx.dts index f2a5f17f312e..2e7b96e7b791 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1-mba6ulx.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1-mba6ulx.dts @@ -6,8 +6,9 @@
/dts-v1/;
-#include "imx6ul-tqma6ul1.dtsi" +#include "imx6ul-tqma6ul2.dtsi" #include "mba6ulx.dtsi" +#include "imx6ul-tqma6ul1.dtsi"
/ { model = "TQ-Systems TQMa6UL1 SoM on MBa6ULx board"; diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1.dtsi index 24192d012ef7..79c8c5529135 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1.dtsi @@ -4,8 +4,6 @@ * Author: Markus Niebel Markus.Niebel@tq-group.com */
-#include "imx6ul-tqma6ul2.dtsi" - / { model = "TQ-Systems TQMa6UL1 SoM"; compatible = "tq,imx6ul-tqma6ul1", "fsl,imx6ul"; diff --git a/arch/arm/boot/dts/ti/omap/omap4-panda-a4.dts b/arch/arm/boot/dts/ti/omap/omap4-panda-a4.dts index 8fd076e5d1b0..4b8bfd0188ad 100644 --- a/arch/arm/boot/dts/ti/omap/omap4-panda-a4.dts +++ b/arch/arm/boot/dts/ti/omap/omap4-panda-a4.dts @@ -7,6 +7,11 @@ #include "omap443x.dtsi" #include "omap4-panda-common.dtsi"
+/ { + model = "TI OMAP4 PandaBoard (A4)"; + compatible = "ti,omap4-panda-a4", "ti,omap4-panda", "ti,omap4430", "ti,omap4"; +}; + /* Pandaboard Rev A4+ have external pullups on SCL & SDA */ &dss_hdmi_pins { pinctrl-single,pins = < diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h index d60f6e83a9f7..14811b4f48ec 100644 --- a/arch/arm/include/asm/vmlinux.lds.h +++ b/arch/arm/include/asm/vmlinux.lds.h @@ -34,6 +34,12 @@ #define NOCROSSREFS #endif
+#ifdef CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY +#define OVERLAY_KEEP(x) KEEP(x) +#else +#define OVERLAY_KEEP(x) x +#endif + /* Set start/end symbol names to the LMA for the section */ #define ARM_LMA(sym, section) \ sym##_start = LOADADDR(section); \ @@ -125,13 +131,13 @@ __vectors_lma = .; \ OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \ .vectors { \ - *(.vectors) \ + OVERLAY_KEEP(*(.vectors)) \ } \ .vectors.bhb.loop8 { \ - *(.vectors.bhb.loop8) \ + OVERLAY_KEEP(*(.vectors.bhb.loop8)) \ } \ .vectors.bhb.bpiall { \ - *(.vectors.bhb.bpiall) \ + OVERLAY_KEEP(*(.vectors.bhb.bpiall)) \ } \ } \ ARM_LMA(__vectors, .vectors); \ diff --git a/arch/arm64/boot/dts/freescale/imx8mp-skov-reva.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-skov-reva.dtsi index 59813ef8e2bb..7ae686d37dda 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-skov-reva.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-skov-reva.dtsi @@ -163,6 +163,19 @@ reg_vsd_3v3: regulator-vsd-3v3 { }; };
+/* + * Board is passively cooled and heatsink is specced for continuous operation + * at 1.2 GHz only. Short bouts of 1.6 GHz are ok, but these should be done + * intentionally, not as part of suspend/resume cycles. + */ +&{/opp-table/opp-1600000000} { + /delete-property/ opp-suspend; +}; + +&{/opp-table/opp-1800000000} { + /delete-property/ opp-suspend; +}; + &A53_0 { cpu-supply = <®_vdd_arm>; }; @@ -247,20 +260,20 @@ reg_vdd_soc: BUCK1 {
reg_vdd_arm: BUCK2 { regulator-name = "VDD_ARM"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <2187500>; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <1000000>; vin-supply = <®_5v_p>; regulator-boot-on; regulator-always-on; regulator-ramp-delay = <3125>; - nxp,dvs-run-voltage = <950000>; + nxp,dvs-run-voltage = <850000>; nxp,dvs-standby-voltage = <850000>; };
reg_vdd_3v3: BUCK4 { regulator-name = "VDD_3V3"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <3400000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; vin-supply = <®_5v_p>; regulator-boot-on; regulator-always-on; @@ -268,8 +281,8 @@ reg_vdd_3v3: BUCK4 {
reg_vdd_1v8: BUCK5 { regulator-name = "VDD_1V8"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <3400000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; vin-supply = <®_5v_p>; regulator-boot-on; regulator-always-on; @@ -277,8 +290,8 @@ reg_vdd_1v8: BUCK5 {
reg_nvcc_dram_1v1: BUCK6 { regulator-name = "NVCC_DRAM_1V1"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <3400000>; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1100000>; vin-supply = <®_5v_p>; regulator-boot-on; regulator-always-on; @@ -286,8 +299,8 @@ reg_nvcc_dram_1v1: BUCK6 {
reg_nvcc_snvs_1v8: LDO1 { regulator-name = "NVCC_SNVS_1V8"; - regulator-min-microvolt = <1600000>; - regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; vin-supply = <®_5v_p>; regulator-boot-on; regulator-always-on; @@ -295,8 +308,8 @@ reg_nvcc_snvs_1v8: LDO1 {
reg_vdda_1v8: LDO3 { regulator-name = "VDDA_1V8"; - regulator-min-microvolt = <800000>; - regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; vin-supply = <®_5v_p>; regulator-boot-on; regulator-always-on; diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi index e0d3b8cba221..54147bce3b83 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi @@ -834,7 +834,7 @@ pgc_audio: power-domain@5 { assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>, <&clk IMX8MP_SYS_PLL1_800M>; assigned-clock-rates = <400000000>, - <600000000>; + <800000000>; };
pgc_gpu2d: power-domain@6 { @@ -1619,10 +1619,11 @@ audio_blk_ctrl: clock-controller@30e20000 { <&clk IMX8MP_CLK_SAI3>, <&clk IMX8MP_CLK_SAI5>, <&clk IMX8MP_CLK_SAI6>, - <&clk IMX8MP_CLK_SAI7>; + <&clk IMX8MP_CLK_SAI7>, + <&clk IMX8MP_CLK_AUDIO_AXI_ROOT>; clock-names = "ahb", "sai1", "sai2", "sai3", - "sai5", "sai6", "sai7"; + "sai5", "sai6", "sai7", "axi"; power-domains = <&pgc_audio>; assigned-clocks = <&clk IMX8MP_AUDIO_PLL1>, <&clk IMX8MP_AUDIO_PLL2>; diff --git a/arch/arm64/boot/dts/mediatek/mt6359.dtsi b/arch/arm64/boot/dts/mediatek/mt6359.dtsi index 150ad84d5d2b..7b10f9c59819 100644 --- a/arch/arm64/boot/dts/mediatek/mt6359.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6359.dtsi @@ -15,7 +15,8 @@ pmic_adc: adc { #io-channel-cells = <1>; };
- mt6359codec: mt6359codec { + mt6359codec: audio-codec { + compatible = "mediatek,mt6359-codec"; };
regulators { diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi index b5d4b5baf478..0d995b342d46 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi @@ -925,8 +925,6 @@ &pwm0 { &pwrap { pmic: pmic { compatible = "mediatek,mt6397"; - #address-cells = <1>; - #size-cells = <1>; interrupts-extended = <&pio 11 IRQ_TYPE_LEVEL_HIGH>; interrupt-controller; #interrupt-cells = <2>; diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 3458be7f7f61..0ca63e8c4e16 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -352,14 +352,14 @@ topckgen: clock-controller@10000000 { #clock-cells = <1>; };
- infracfg: power-controller@10001000 { + infracfg: clock-controller@10001000 { compatible = "mediatek,mt8173-infracfg", "syscon"; reg = <0 0x10001000 0 0x1000>; #clock-cells = <1>; #reset-cells = <1>; };
- pericfg: power-controller@10003000 { + pericfg: clock-controller@10003000 { compatible = "mediatek,mt8173-pericfg", "syscon"; reg = <0 0x10003000 0 0x1000>; #clock-cells = <1>; @@ -564,7 +564,7 @@ vpu: vpu@10020000 { memory-region = <&vpu_dma_reserved>; };
- sysirq: intpol-controller@10200620 { + sysirq: interrupt-controller@10200620 { compatible = "mediatek,mt8173-sysirq", "mediatek,mt6577-sysirq"; interrupt-controller; diff --git a/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts b/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts index 04e4a2f73799..612336713a64 100644 --- a/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts +++ b/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts @@ -8,1047 +8,16 @@ /dts-v1/;
#include "mt8188.dtsi" -#include "mt6359.dtsi" -#include <dt-bindings/gpio/gpio.h> -#include <dt-bindings/input/input.h> -#include <dt-bindings/interrupt-controller/irq.h> -#include <dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h> -#include <dt-bindings/regulator/mediatek,mt6360-regulator.h> -#include <dt-bindings/spmi/spmi.h> -#include <dt-bindings/usb/pd.h> +#include "mt8390-genio-common.dtsi"
/ { model = "MediaTek Genio-700 EVK"; compatible = "mediatek,mt8390-evk", "mediatek,mt8390", "mediatek,mt8188";
- aliases { - ethernet0 = ð - i2c0 = &i2c0; - i2c1 = &i2c1; - i2c2 = &i2c2; - i2c3 = &i2c3; - i2c4 = &i2c4; - i2c5 = &i2c5; - i2c6 = &i2c6; - mmc0 = &mmc0; - mmc1 = &mmc1; - serial0 = &uart0; - }; - - chosen { - stdout-path = "serial0:921600n8"; - }; - - firmware { - optee { - compatible = "linaro,optee-tz"; - method = "smc"; - }; - }; - memory@40000000 { device_type = "memory"; reg = <0 0x40000000 0x2 0x00000000>; }; - - reserved-memory { - #address-cells = <2>; - #size-cells = <2>; - ranges; - - /* - * 12 MiB reserved for OP-TEE (BL32) - * +-----------------------+ 0x43e0_0000 - * | SHMEM 2MiB | - * +-----------------------+ 0x43c0_0000 - * | | TA_RAM 8MiB | - * + TZDRAM +--------------+ 0x4340_0000 - * | | TEE_RAM 2MiB | - * +-----------------------+ 0x4320_0000 - */ - optee_reserved: optee@43200000 { - no-map; - reg = <0 0x43200000 0 0x00c00000>; - }; - - scp_mem: memory@50000000 { - compatible = "shared-dma-pool"; - reg = <0 0x50000000 0 0x2900000>; - no-map; - }; - - /* 2 MiB reserved for ARM Trusted Firmware (BL31) */ - bl31_secmon_reserved: memory@54600000 { - no-map; - reg = <0 0x54600000 0x0 0x200000>; - }; - - apu_mem: memory@55000000 { - compatible = "shared-dma-pool"; - reg = <0 0x55000000 0 0x1400000>; /* 20 MB */ - }; - - vpu_mem: memory@57000000 { - compatible = "shared-dma-pool"; - reg = <0 0x57000000 0 0x1400000>; /* 20 MB */ - }; - - adsp_mem: memory@60000000 { - compatible = "shared-dma-pool"; - reg = <0 0x60000000 0 0xf00000>; - no-map; - }; - - afe_dma_mem: memory@60f00000 { - compatible = "shared-dma-pool"; - reg = <0 0x60f00000 0 0x100000>; - no-map; - }; - - adsp_dma_mem: memory@61000000 { - compatible = "shared-dma-pool"; - reg = <0 0x61000000 0 0x100000>; - no-map; - }; - }; - - common_fixed_5v: regulator-0 { - compatible = "regulator-fixed"; - regulator-name = "vdd_5v"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - gpio = <&pio 10 GPIO_ACTIVE_HIGH>; - enable-active-high; - regulator-always-on; - vin-supply = <®_vsys>; - }; - - edp_panel_fixed_3v3: regulator-1 { - compatible = "regulator-fixed"; - regulator-name = "vedp_3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - enable-active-high; - gpio = <&pio 15 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&edp_panel_3v3_en_pins>; - vin-supply = <®_vsys>; - }; - - gpio_fixed_3v3: regulator-2 { - compatible = "regulator-fixed"; - regulator-name = "ext_3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&pio 9 GPIO_ACTIVE_HIGH>; - enable-active-high; - regulator-always-on; - vin-supply = <®_vsys>; - }; - - /* system wide 4.2V power rail from charger */ - reg_vsys: regulator-vsys { - compatible = "regulator-fixed"; - regulator-name = "vsys"; - regulator-always-on; - regulator-boot-on; - }; - - /* used by mmc2 */ - sdio_fixed_1v8: regulator-3 { - compatible = "regulator-fixed"; - regulator-name = "vio18_conn"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - enable-active-high; - regulator-always-on; - }; - - /* used by mmc2 */ - sdio_fixed_3v3: regulator-4 { - compatible = "regulator-fixed"; - regulator-name = "wifi_3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&pio 74 GPIO_ACTIVE_HIGH>; - enable-active-high; - regulator-always-on; - vin-supply = <®_vsys>; - }; - - touch0_fixed_3v3: regulator-5 { - compatible = "regulator-fixed"; - regulator-name = "vio33_tp1"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&pio 119 GPIO_ACTIVE_HIGH>; - enable-active-high; - vin-supply = <®_vsys>; - }; - - usb_hub_fixed_3v3: regulator-6 { - compatible = "regulator-fixed"; - regulator-name = "vhub_3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&pio 112 GPIO_ACTIVE_HIGH>; /* HUB_3V3_EN */ - startup-delay-us = <10000>; - enable-active-high; - vin-supply = <®_vsys>; - }; - - usb_p0_vbus: regulator-7 { - compatible = "regulator-fixed"; - regulator-name = "vbus_p0"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - gpio = <&pio 84 GPIO_ACTIVE_HIGH>; - enable-active-high; - vin-supply = <®_vsys>; - }; - - usb_p1_vbus: regulator-8 { - compatible = "regulator-fixed"; - regulator-name = "vbus_p1"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - gpio = <&pio 87 GPIO_ACTIVE_HIGH>; - enable-active-high; - vin-supply = <®_vsys>; - }; - - /* used by ssusb2 */ - usb_p2_vbus: regulator-9 { - compatible = "regulator-fixed"; - regulator-name = "wifi_3v3"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - enable-active-high; - }; -}; - -&adsp { - memory-region = <&adsp_dma_mem>, <&adsp_mem>; - status = "okay"; -}; - -&afe { - memory-region = <&afe_dma_mem>; - status = "okay"; -}; - -&gpu { - mali-supply = <&mt6359_vproc2_buck_reg>; - status = "okay"; -}; - -&i2c0 { - pinctrl-names = "default"; - pinctrl-0 = <&i2c0_pins>; - clock-frequency = <400000>; - status = "okay"; - - touchscreen@5d { - compatible = "goodix,gt9271"; - reg = <0x5d>; - interrupt-parent = <&pio>; - interrupts-extended = <&pio 6 IRQ_TYPE_EDGE_RISING>; - irq-gpios = <&pio 6 GPIO_ACTIVE_HIGH>; - reset-gpios = <&pio 5 GPIO_ACTIVE_HIGH>; - AVDD28-supply = <&touch0_fixed_3v3>; - VDDIO-supply = <&mt6359_vio18_ldo_reg>; - pinctrl-names = "default"; - pinctrl-0 = <&touch_pins>; - }; -}; - -&i2c1 { - pinctrl-names = "default"; - pinctrl-0 = <&i2c1_pins>; - clock-frequency = <400000>; - status = "okay"; -}; - -&i2c2 { - pinctrl-names = "default"; - pinctrl-0 = <&i2c2_pins>; - clock-frequency = <400000>; - status = "okay"; -}; - -&i2c3 { - pinctrl-names = "default"; - pinctrl-0 = <&i2c3_pins>; - clock-frequency = <400000>; - status = "okay"; -}; - -&i2c4 { - pinctrl-names = "default"; - pinctrl-0 = <&i2c4_pins>; - clock-frequency = <1000000>; - status = "okay"; -}; - -&i2c5 { - pinctrl-names = "default"; - pinctrl-0 = <&i2c5_pins>; - clock-frequency = <400000>; - status = "okay"; -}; - -&i2c6 { - pinctrl-names = "default"; - pinctrl-0 = <&i2c6_pins>; - clock-frequency = <400000>; - status = "okay"; -}; - -&mfg0 { - domain-supply = <&mt6359_vproc2_buck_reg>; -}; - -&mfg1 { - domain-supply = <&mt6359_vsram_others_ldo_reg>; -}; - -&mmc0 { - status = "okay"; - pinctrl-names = "default", "state_uhs"; - pinctrl-0 = <&mmc0_default_pins>; - pinctrl-1 = <&mmc0_uhs_pins>; - bus-width = <8>; - max-frequency = <200000000>; - cap-mmc-highspeed; - mmc-hs200-1_8v; - mmc-hs400-1_8v; - supports-cqe; - cap-mmc-hw-reset; - no-sdio; - no-sd; - hs400-ds-delay = <0x1481b>; - vmmc-supply = <&mt6359_vemc_1_ldo_reg>; - vqmmc-supply = <&mt6359_vufs_ldo_reg>; - non-removable; -}; - -&mmc1 { - status = "okay"; - pinctrl-names = "default", "state_uhs"; - pinctrl-0 = <&mmc1_default_pins>; - pinctrl-1 = <&mmc1_uhs_pins>; - bus-width = <4>; - max-frequency = <200000000>; - cap-sd-highspeed; - sd-uhs-sdr50; - sd-uhs-sdr104; - no-mmc; - no-sdio; - cd-gpios = <&pio 2 GPIO_ACTIVE_LOW>; - vmmc-supply = <&mt6359_vpa_buck_reg>; - vqmmc-supply = <&mt6359_vsim1_ldo_reg>; -}; - -&mt6359_vbbck_ldo_reg { - regulator-always-on; -}; - -&mt6359_vcn18_ldo_reg { - regulator-name = "vcn18_pmu"; - regulator-always-on; -}; - -&mt6359_vcn33_2_bt_ldo_reg { - regulator-name = "vcn33_2_pmu"; - regulator-always-on; -}; - -&mt6359_vcore_buck_reg { - regulator-name = "dvdd_proc_l"; - regulator-always-on; -}; - -&mt6359_vgpu11_buck_reg { - regulator-name = "dvdd_core"; - regulator-always-on; -}; - -&mt6359_vpa_buck_reg { - regulator-name = "vpa_pmu"; - regulator-max-microvolt = <3100000>; -}; - -&mt6359_vproc2_buck_reg { - /* The name "vgpu" is required by mtk-regulator-coupler */ - regulator-name = "vgpu"; - regulator-min-microvolt = <550000>; - regulator-max-microvolt = <800000>; - regulator-coupled-with = <&mt6359_vsram_others_ldo_reg>; - regulator-coupled-max-spread = <6250>; -}; - -&mt6359_vpu_buck_reg { - regulator-name = "dvdd_adsp"; - regulator-always-on; -}; - -&mt6359_vrf12_ldo_reg { - regulator-name = "va12_abb2_pmu"; - regulator-always-on; -}; - -&mt6359_vsim1_ldo_reg { - regulator-name = "vsim1_pmu"; - regulator-enable-ramp-delay = <480>; -}; - -&mt6359_vsram_others_ldo_reg { - /* The name "vsram_gpu" is required by mtk-regulator-coupler */ - regulator-name = "vsram_gpu"; - regulator-min-microvolt = <750000>; - regulator-max-microvolt = <800000>; - regulator-coupled-with = <&mt6359_vproc2_buck_reg>; - regulator-coupled-max-spread = <6250>; -}; - -&mt6359_vufs_ldo_reg { - regulator-name = "vufs18_pmu"; - regulator-always-on; -}; - -&mt6359codec { - mediatek,mic-type-0 = <1>; /* ACC */ - mediatek,mic-type-1 = <3>; /* DCC */ -}; - -&pcie { - pinctrl-names = "default"; - pinctrl-0 = <&pcie_pins_default>; - status = "okay"; -}; - -&pciephy { - status = "okay"; -}; - -&pio { - audio_default_pins: audio-default-pins { - pins-cmd-dat { - pinmux = <PINMUX_GPIO101__FUNC_O_AUD_CLK_MOSI>, - <PINMUX_GPIO102__FUNC_O_AUD_SYNC_MOSI>, - <PINMUX_GPIO103__FUNC_O_AUD_DAT_MOSI0>, - <PINMUX_GPIO104__FUNC_O_AUD_DAT_MOSI1>, - <PINMUX_GPIO105__FUNC_I0_AUD_DAT_MISO0>, - <PINMUX_GPIO106__FUNC_I0_AUD_DAT_MISO1>, - <PINMUX_GPIO107__FUNC_B0_I2SIN_MCK>, - <PINMUX_GPIO108__FUNC_B0_I2SIN_BCK>, - <PINMUX_GPIO109__FUNC_B0_I2SIN_WS>, - <PINMUX_GPIO110__FUNC_I0_I2SIN_D0>, - <PINMUX_GPIO114__FUNC_O_I2SO2_MCK>, - <PINMUX_GPIO115__FUNC_B0_I2SO2_BCK>, - <PINMUX_GPIO116__FUNC_B0_I2SO2_WS>, - <PINMUX_GPIO117__FUNC_O_I2SO2_D0>, - <PINMUX_GPIO118__FUNC_O_I2SO2_D1>, - <PINMUX_GPIO121__FUNC_B0_PCM_CLK>, - <PINMUX_GPIO122__FUNC_B0_PCM_SYNC>, - <PINMUX_GPIO124__FUNC_I0_PCM_DI>, - <PINMUX_GPIO125__FUNC_O_DMIC1_CLK>, - <PINMUX_GPIO126__FUNC_I0_DMIC1_DAT>, - <PINMUX_GPIO128__FUNC_O_DMIC2_CLK>, - <PINMUX_GPIO129__FUNC_I0_DMIC2_DAT>; - }; - }; - - dptx_pins: dptx-pins { - pins-cmd-dat { - pinmux = <PINMUX_GPIO46__FUNC_I0_DP_TX_HPD>; - bias-pull-up; - }; - }; - - edp_panel_3v3_en_pins: edp-panel-3v3-en-pins { - pins1 { - pinmux = <PINMUX_GPIO15__FUNC_B_GPIO15>; - output-high; - }; - }; - - eth_default_pins: eth-default-pins { - pins-cc { - pinmux = <PINMUX_GPIO139__FUNC_B0_GBE_TXC>, - <PINMUX_GPIO140__FUNC_I0_GBE_RXC>, - <PINMUX_GPIO141__FUNC_I0_GBE_RXDV>, - <PINMUX_GPIO142__FUNC_O_GBE_TXEN>; - drive-strength = <8>; - }; - - pins-mdio { - pinmux = <PINMUX_GPIO143__FUNC_O_GBE_MDC>, - <PINMUX_GPIO144__FUNC_B1_GBE_MDIO>; - drive-strength = <8>; - input-enable; - }; - - pins-power { - pinmux = <PINMUX_GPIO145__FUNC_B_GPIO145>, - <PINMUX_GPIO146__FUNC_B_GPIO146>; - output-high; - }; - - pins-rxd { - pinmux = <PINMUX_GPIO135__FUNC_I0_GBE_RXD3>, - <PINMUX_GPIO136__FUNC_I0_GBE_RXD2>, - <PINMUX_GPIO137__FUNC_I0_GBE_RXD1>, - <PINMUX_GPIO138__FUNC_I0_GBE_RXD0>; - drive-strength = <8>; - }; - - pins-txd { - pinmux = <PINMUX_GPIO131__FUNC_O_GBE_TXD3>, - <PINMUX_GPIO132__FUNC_O_GBE_TXD2>, - <PINMUX_GPIO133__FUNC_O_GBE_TXD1>, - <PINMUX_GPIO134__FUNC_O_GBE_TXD0>; - drive-strength = <8>; - }; - }; - - eth_sleep_pins: eth-sleep-pins { - pins-cc { - pinmux = <PINMUX_GPIO139__FUNC_B_GPIO139>, - <PINMUX_GPIO140__FUNC_B_GPIO140>, - <PINMUX_GPIO141__FUNC_B_GPIO141>, - <PINMUX_GPIO142__FUNC_B_GPIO142>; - }; - - pins-mdio { - pinmux = <PINMUX_GPIO143__FUNC_B_GPIO143>, - <PINMUX_GPIO144__FUNC_B_GPIO144>; - input-disable; - bias-disable; - }; - - pins-rxd { - pinmux = <PINMUX_GPIO135__FUNC_B_GPIO135>, - <PINMUX_GPIO136__FUNC_B_GPIO136>, - <PINMUX_GPIO137__FUNC_B_GPIO137>, - <PINMUX_GPIO138__FUNC_B_GPIO138>; - }; - - pins-txd { - pinmux = <PINMUX_GPIO131__FUNC_B_GPIO131>, - <PINMUX_GPIO132__FUNC_B_GPIO132>, - <PINMUX_GPIO133__FUNC_B_GPIO133>, - <PINMUX_GPIO134__FUNC_B_GPIO134>; - }; - }; - - i2c0_pins: i2c0-pins { - pins { - pinmux = <PINMUX_GPIO56__FUNC_B1_SDA0>, - <PINMUX_GPIO55__FUNC_B1_SCL0>; - bias-pull-up = <MTK_PULL_SET_RSEL_011>; - drive-strength-microamp = <1000>; - }; - }; - - i2c1_pins: i2c1-pins { - pins { - pinmux = <PINMUX_GPIO58__FUNC_B1_SDA1>, - <PINMUX_GPIO57__FUNC_B1_SCL1>; - bias-pull-up = <MTK_PULL_SET_RSEL_011>; - drive-strength-microamp = <1000>; - }; - }; - - i2c2_pins: i2c2-pins { - pins { - pinmux = <PINMUX_GPIO60__FUNC_B1_SDA2>, - <PINMUX_GPIO59__FUNC_B1_SCL2>; - bias-pull-up = <MTK_PULL_SET_RSEL_011>; - drive-strength-microamp = <1000>; - }; - }; - - i2c3_pins: i2c3-pins { - pins { - pinmux = <PINMUX_GPIO62__FUNC_B1_SDA3>, - <PINMUX_GPIO61__FUNC_B1_SCL3>; - bias-pull-up = <MTK_PULL_SET_RSEL_011>; - drive-strength-microamp = <1000>; - }; - }; - - i2c4_pins: i2c4-pins { - pins { - pinmux = <PINMUX_GPIO64__FUNC_B1_SDA4>, - <PINMUX_GPIO63__FUNC_B1_SCL4>; - bias-pull-up = <MTK_PULL_SET_RSEL_011>; - drive-strength-microamp = <1000>; - }; - }; - - i2c5_pins: i2c5-pins { - pins { - pinmux = <PINMUX_GPIO66__FUNC_B1_SDA5>, - <PINMUX_GPIO65__FUNC_B1_SCL5>; - bias-pull-up = <MTK_PULL_SET_RSEL_011>; - drive-strength-microamp = <1000>; - }; - }; - - i2c6_pins: i2c6-pins { - pins { - pinmux = <PINMUX_GPIO68__FUNC_B1_SDA6>, - <PINMUX_GPIO67__FUNC_B1_SCL6>; - bias-pull-up = <MTK_PULL_SET_RSEL_011>; - drive-strength-microamp = <1000>; - }; - }; - - gpio_key_pins: gpio-key-pins { - pins { - pinmux = <PINMUX_GPIO42__FUNC_B1_KPCOL0>, - <PINMUX_GPIO43__FUNC_B1_KPCOL1>, - <PINMUX_GPIO44__FUNC_B1_KPROW0>; - }; - }; - - mmc0_default_pins: mmc0-default-pins { - pins-clk { - pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>; - drive-strength = <6>; - bias-pull-down = <MTK_PUPD_SET_R1R0_10>; - }; - - pins-cmd-dat { - pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>, - <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>, - <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>, - <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>, - <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>, - <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>, - <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>, - <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>, - <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>; - input-enable; - drive-strength = <6>; - bias-pull-up = <MTK_PUPD_SET_R1R0_01>; - }; - - pins-rst { - pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>; - drive-strength = <6>; - bias-pull-up = <MTK_PUPD_SET_R1R0_01>; - }; - }; - - mmc0_uhs_pins: mmc0-uhs-pins { - pins-clk { - pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>; - drive-strength = <8>; - bias-pull-down = <MTK_PUPD_SET_R1R0_10>; - }; - - pins-cmd-dat { - pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>, - <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>, - <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>, - <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>, - <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>, - <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>, - <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>, - <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>, - <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>; - input-enable; - drive-strength = <8>; - bias-pull-up = <MTK_PUPD_SET_R1R0_01>; - }; - - pins-ds { - pinmux = <PINMUX_GPIO162__FUNC_B0_MSDC0_DSL>; - drive-strength = <8>; - bias-pull-down = <MTK_PUPD_SET_R1R0_10>; - }; - - pins-rst { - pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>; - drive-strength = <8>; - bias-pull-up = <MTK_PUPD_SET_R1R0_01>; - }; - }; - - mmc1_default_pins: mmc1-default-pins { - pins-clk { - pinmux = <PINMUX_GPIO164__FUNC_B1_MSDC1_CLK>; - drive-strength = <6>; - bias-pull-down = <MTK_PUPD_SET_R1R0_10>; - }; - - pins-cmd-dat { - pinmux = <PINMUX_GPIO163__FUNC_B1_MSDC1_CMD>, - <PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0>, - <PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1>, - <PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2>, - <PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3>; - input-enable; - drive-strength = <6>; - bias-pull-up = <MTK_PUPD_SET_R1R0_01>; - }; - - pins-insert { - pinmux = <PINMUX_GPIO2__FUNC_B_GPIO2>; - bias-pull-up; - }; - }; - - mmc1_uhs_pins: mmc1-uhs-pins { - pins-clk { - pinmux = <PINMUX_GPIO164__FUNC_B1_MSDC1_CLK>; - drive-strength = <6>; - bias-pull-down = <MTK_PUPD_SET_R1R0_10>; - }; - - pins-cmd-dat { - pinmux = <PINMUX_GPIO163__FUNC_B1_MSDC1_CMD>, - <PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0>, - <PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1>, - <PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2>, - <PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3>; - input-enable; - drive-strength = <6>; - bias-pull-up = <MTK_PUPD_SET_R1R0_01>; - }; - }; - - mmc2_default_pins: mmc2-default-pins { - pins-clk { - pinmux = <PINMUX_GPIO170__FUNC_B1_MSDC2_CLK>; - drive-strength = <4>; - bias-pull-down = <MTK_PUPD_SET_R1R0_10>; - }; - - pins-cmd-dat { - pinmux = <PINMUX_GPIO169__FUNC_B1_MSDC2_CMD>, - <PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0>, - <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>, - <PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2>, - <PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3>; - input-enable; - drive-strength = <6>; - bias-pull-up = <MTK_PUPD_SET_R1R0_01>; - }; - - pins-pcm { - pinmux = <PINMUX_GPIO123__FUNC_O_PCM_DO>; - }; - }; - - mmc2_uhs_pins: mmc2-uhs-pins { - pins-clk { - pinmux = <PINMUX_GPIO170__FUNC_B1_MSDC2_CLK>; - drive-strength = <4>; - bias-pull-down = <MTK_PUPD_SET_R1R0_10>; - }; - - pins-cmd-dat { - pinmux = <PINMUX_GPIO169__FUNC_B1_MSDC2_CMD>, - <PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0>, - <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>, - <PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2>, - <PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3>; - input-enable; - drive-strength = <6>; - bias-pull-up = <MTK_PUPD_SET_R1R0_01>; - }; - }; - - mmc2_eint_pins: mmc2-eint-pins { - pins-dat1 { - pinmux = <PINMUX_GPIO172__FUNC_B_GPIO172>; - input-enable; - bias-pull-up = <MTK_PUPD_SET_R1R0_01>; - }; - }; - - mmc2_dat1_pins: mmc2-dat1-pins { - pins-dat1 { - pinmux = <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>; - input-enable; - drive-strength = <6>; - bias-pull-up = <MTK_PUPD_SET_R1R0_01>; - }; - }; - - panel_default_pins: panel-default-pins { - pins-dcdc { - pinmux = <PINMUX_GPIO45__FUNC_B_GPIO45>; - output-low; - }; - - pins-en { - pinmux = <PINMUX_GPIO111__FUNC_B_GPIO111>; - output-low; - }; - - pins-rst { - pinmux = <PINMUX_GPIO25__FUNC_B_GPIO25>; - output-high; - }; - }; - - pcie_pins_default: pcie-default { - mux { - pinmux = <PINMUX_GPIO47__FUNC_I1_WAKEN>, - <PINMUX_GPIO48__FUNC_O_PERSTN>, - <PINMUX_GPIO49__FUNC_B1_CLKREQN>; - bias-pull-up; - }; - }; - - rt1715_int_pins: rt1715-int-pins { - pins_cmd0_dat { - pinmux = <PINMUX_GPIO12__FUNC_B_GPIO12>; - bias-pull-up; - input-enable; - }; - }; - - spi0_pins: spi0-pins { - pins-spi { - pinmux = <PINMUX_GPIO69__FUNC_O_SPIM0_CSB>, - <PINMUX_GPIO70__FUNC_O_SPIM0_CLK>, - <PINMUX_GPIO71__FUNC_B0_SPIM0_MOSI>, - <PINMUX_GPIO72__FUNC_B0_SPIM0_MISO>; - bias-disable; - }; - }; - - spi1_pins: spi1-pins { - pins-spi { - pinmux = <PINMUX_GPIO75__FUNC_O_SPIM1_CSB>, - <PINMUX_GPIO76__FUNC_O_SPIM1_CLK>, - <PINMUX_GPIO77__FUNC_B0_SPIM1_MOSI>, - <PINMUX_GPIO78__FUNC_B0_SPIM1_MISO>; - bias-disable; - }; - }; - - spi2_pins: spi2-pins { - pins-spi { - pinmux = <PINMUX_GPIO79__FUNC_O_SPIM2_CSB>, - <PINMUX_GPIO80__FUNC_O_SPIM2_CLK>, - <PINMUX_GPIO81__FUNC_B0_SPIM2_MOSI>, - <PINMUX_GPIO82__FUNC_B0_SPIM2_MISO>; - bias-disable; - }; - }; - - touch_pins: touch-pins { - pins-irq { - pinmux = <PINMUX_GPIO6__FUNC_B_GPIO6>; - input-enable; - bias-disable; - }; - - pins-reset { - pinmux = <PINMUX_GPIO5__FUNC_B_GPIO5>; - output-high; - }; - }; - - uart0_pins: uart0-pins { - pins { - pinmux = <PINMUX_GPIO31__FUNC_O_UTXD0>, - <PINMUX_GPIO32__FUNC_I1_URXD0>; - bias-pull-up; - }; - }; - - uart1_pins: uart1-pins { - pins { - pinmux = <PINMUX_GPIO33__FUNC_O_UTXD1>, - <PINMUX_GPIO34__FUNC_I1_URXD1>; - bias-pull-up; - }; - }; - - uart2_pins: uart2-pins { - pins { - pinmux = <PINMUX_GPIO35__FUNC_O_UTXD2>, - <PINMUX_GPIO36__FUNC_I1_URXD2>; - bias-pull-up; - }; - }; - - usb_default_pins: usb-default-pins { - pins-iddig { - pinmux = <PINMUX_GPIO83__FUNC_B_GPIO83>; - input-enable; - bias-pull-up; - }; - - pins-valid { - pinmux = <PINMUX_GPIO85__FUNC_I0_VBUSVALID>; - input-enable; - }; - - pins-vbus { - pinmux = <PINMUX_GPIO84__FUNC_O_USB_DRVVBUS>; - output-high; - }; - - }; - - usb1_default_pins: usb1-default-pins { - pins-valid { - pinmux = <PINMUX_GPIO88__FUNC_I0_VBUSVALID_1P>; - input-enable; - }; - - pins-usb-hub-3v3-en { - pinmux = <PINMUX_GPIO112__FUNC_B_GPIO112>; - output-high; - }; - }; - - wifi_pwrseq_pins: wifi-pwrseq-pins { - pins-wifi-enable { - pinmux = <PINMUX_GPIO127__FUNC_B_GPIO127>; - output-low; - }; - }; -}; - -ð { - phy-mode ="rgmii-id"; - phy-handle = <ðernet_phy0>; - pinctrl-names = "default", "sleep"; - pinctrl-0 = <ð_default_pins>; - pinctrl-1 = <ð_sleep_pins>; - mediatek,mac-wol; - snps,reset-gpio = <&pio 147 GPIO_ACTIVE_HIGH>; - snps,reset-delays-us = <0 10000 10000>; - status = "okay"; -}; - -ð_mdio { - ethernet_phy0: ethernet-phy@1 { - compatible = "ethernet-phy-id001c.c916"; - reg = <0x1>; - }; -}; - -&pmic { - interrupt-parent = <&pio>; - interrupts = <222 IRQ_TYPE_LEVEL_HIGH>; - - mt6359keys: keys { - compatible = "mediatek,mt6359-keys"; - mediatek,long-press-mode = <1>; - power-off-time-sec = <0>; - - power-key { - linux,keycodes = <KEY_POWER>; - wakeup-source; - }; - }; -}; - -&scp { - memory-region = <&scp_mem>; - status = "okay"; -}; - -&sound { - compatible = "mediatek,mt8390-mt6359-evk", "mediatek,mt8188-mt6359-evb"; - model = "mt8390-evk"; - pinctrl-names = "default"; - pinctrl-0 = <&audio_default_pins>; - audio-routing = - "Headphone", "Headphone L", - "Headphone", "Headphone R"; - mediatek,adsp = <&adsp>; - status = "okay"; - - dai-link-0 { - link-name = "DL_SRC_BE"; - - codec { - sound-dai = <&pmic 0>; - }; - }; -}; - -&spi2 { - pinctrl-0 = <&spi2_pins>; - pinctrl-names = "default"; - mediatek,pad-select = <0>; - #address-cells = <1>; - #size-cells = <0>; - status = "okay"; };
-&uart0 { - pinctrl-0 = <&uart0_pins>; - pinctrl-names = "default"; - status = "okay"; -}; - -&uart1 { - pinctrl-0 = <&uart1_pins>; - pinctrl-names = "default"; - status = "okay"; -}; - -&uart2 { - pinctrl-0 = <&uart2_pins>; - pinctrl-names = "default"; - status = "okay"; -}; - -&u3phy0 { - status = "okay"; -}; - -&u3phy1 { - status = "okay"; -}; - -&u3phy2 { - status = "okay"; -}; - -&xhci0 { - status = "okay"; - vusb33-supply = <&mt6359_vusb_ldo_reg>; -}; - -&xhci1 { - status = "okay"; - vusb33-supply = <&mt6359_vusb_ldo_reg>; - #address-cells = <1>; - #size-cells = <0>; - - hub_2_0: hub@1 { - compatible = "usb451,8025"; - reg = <1>; - peer-hub = <&hub_3_0>; - reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>; - vdd-supply = <&usb_hub_fixed_3v3>; - }; - - hub_3_0: hub@2 { - compatible = "usb451,8027"; - reg = <2>; - peer-hub = <&hub_2_0>; - reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>; - vdd-supply = <&usb_hub_fixed_3v3>; - }; -}; - -&xhci2 { - status = "okay"; - vusb33-supply = <&mt6359_vusb_ldo_reg>; - vbus-supply = <&sdio_fixed_3v3>; /* wifi_3v3 */ -}; diff --git a/arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi b/arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi new file mode 100644 index 000000000000..e828864433a6 --- /dev/null +++ b/arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi @@ -0,0 +1,1046 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (C) 2023 MediaTek Inc. + * Author: Chris Chen chris-qj.chen@mediatek.com + * Pablo Sun pablo.sun@mediatek.com + * Macpaul Lin macpaul.lin@mediatek.com + * + * Copyright (C) 2025 Collabora Ltd. + * Louis-Alexis Eyraud louisalexis.eyraud@collabora.com + * AngeloGioacchino Del Regno angelogioacchino.delregno@collabora.com + */ + +#include "mt6359.dtsi" +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h> +#include <dt-bindings/regulator/mediatek,mt6360-regulator.h> +#include <dt-bindings/spmi/spmi.h> +#include <dt-bindings/usb/pd.h> + +/ { + aliases { + ethernet0 = ð + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; + i2c4 = &i2c4; + i2c5 = &i2c5; + i2c6 = &i2c6; + mmc0 = &mmc0; + mmc1 = &mmc1; + serial0 = &uart0; + }; + + chosen { + stdout-path = "serial0:921600n8"; + }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* + * 12 MiB reserved for OP-TEE (BL32) + * +-----------------------+ 0x43e0_0000 + * | SHMEM 2MiB | + * +-----------------------+ 0x43c0_0000 + * | | TA_RAM 8MiB | + * + TZDRAM +--------------+ 0x4340_0000 + * | | TEE_RAM 2MiB | + * +-----------------------+ 0x4320_0000 + */ + optee_reserved: optee@43200000 { + no-map; + reg = <0 0x43200000 0 0x00c00000>; + }; + + scp_mem: memory@50000000 { + compatible = "shared-dma-pool"; + reg = <0 0x50000000 0 0x2900000>; + no-map; + }; + + /* 2 MiB reserved for ARM Trusted Firmware (BL31) */ + bl31_secmon_reserved: memory@54600000 { + no-map; + reg = <0 0x54600000 0x0 0x200000>; + }; + + apu_mem: memory@55000000 { + compatible = "shared-dma-pool"; + reg = <0 0x55000000 0 0x1400000>; /* 20 MB */ + }; + + vpu_mem: memory@57000000 { + compatible = "shared-dma-pool"; + reg = <0 0x57000000 0 0x1400000>; /* 20 MB */ + }; + + adsp_mem: memory@60000000 { + compatible = "shared-dma-pool"; + reg = <0 0x60000000 0 0xf00000>; + no-map; + }; + + afe_dma_mem: memory@60f00000 { + compatible = "shared-dma-pool"; + reg = <0 0x60f00000 0 0x100000>; + no-map; + }; + + adsp_dma_mem: memory@61000000 { + compatible = "shared-dma-pool"; + reg = <0 0x61000000 0 0x100000>; + no-map; + }; + }; + + common_fixed_5v: regulator-0 { + compatible = "regulator-fixed"; + regulator-name = "vdd_5v"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&pio 10 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + vin-supply = <®_vsys>; + }; + + edp_panel_fixed_3v3: regulator-1 { + compatible = "regulator-fixed"; + regulator-name = "vedp_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + enable-active-high; + gpio = <&pio 15 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&edp_panel_3v3_en_pins>; + vin-supply = <®_vsys>; + }; + + gpio_fixed_3v3: regulator-2 { + compatible = "regulator-fixed"; + regulator-name = "ext_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&pio 9 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + vin-supply = <®_vsys>; + }; + + /* system wide 4.2V power rail from charger */ + reg_vsys: regulator-vsys { + compatible = "regulator-fixed"; + regulator-name = "vsys"; + regulator-always-on; + regulator-boot-on; + }; + + /* used by mmc2 */ + sdio_fixed_1v8: regulator-3 { + compatible = "regulator-fixed"; + regulator-name = "vio18_conn"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + enable-active-high; + regulator-always-on; + }; + + /* used by mmc2 */ + sdio_fixed_3v3: regulator-4 { + compatible = "regulator-fixed"; + regulator-name = "wifi_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&pio 74 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + vin-supply = <®_vsys>; + }; + + touch0_fixed_3v3: regulator-5 { + compatible = "regulator-fixed"; + regulator-name = "vio33_tp1"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&pio 119 GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <®_vsys>; + }; + + usb_hub_fixed_3v3: regulator-6 { + compatible = "regulator-fixed"; + regulator-name = "vhub_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&pio 112 GPIO_ACTIVE_HIGH>; /* HUB_3V3_EN */ + startup-delay-us = <10000>; + enable-active-high; + vin-supply = <®_vsys>; + }; + + usb_p0_vbus: regulator-7 { + compatible = "regulator-fixed"; + regulator-name = "vbus_p0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&pio 84 GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <®_vsys>; + }; + + usb_p1_vbus: regulator-8 { + compatible = "regulator-fixed"; + regulator-name = "vbus_p1"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&pio 87 GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <®_vsys>; + }; + + /* used by ssusb2 */ + usb_p2_vbus: regulator-9 { + compatible = "regulator-fixed"; + regulator-name = "vbus_p2"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + enable-active-high; + }; +}; + +&adsp { + memory-region = <&adsp_dma_mem>, <&adsp_mem>; + status = "okay"; +}; + +&afe { + memory-region = <&afe_dma_mem>; + status = "okay"; +}; + +&gpu { + mali-supply = <&mt6359_vproc2_buck_reg>; + status = "okay"; +}; + +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pins>; + clock-frequency = <400000>; + status = "okay"; + + touchscreen@5d { + compatible = "goodix,gt9271"; + reg = <0x5d>; + interrupt-parent = <&pio>; + interrupts-extended = <&pio 6 IRQ_TYPE_EDGE_RISING>; + irq-gpios = <&pio 6 GPIO_ACTIVE_HIGH>; + reset-gpios = <&pio 5 GPIO_ACTIVE_HIGH>; + AVDD28-supply = <&touch0_fixed_3v3>; + VDDIO-supply = <&mt6359_vio18_ldo_reg>; + pinctrl-names = "default"; + pinctrl-0 = <&touch_pins>; + }; +}; + +&i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; + clock-frequency = <400000>; + status = "okay"; +}; + +&i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins>; + clock-frequency = <400000>; + status = "okay"; +}; + +&i2c3 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c3_pins>; + clock-frequency = <400000>; + status = "okay"; +}; + +&i2c4 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c4_pins>; + clock-frequency = <1000000>; + status = "okay"; +}; + +&i2c5 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c5_pins>; + clock-frequency = <400000>; + status = "okay"; +}; + +&i2c6 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c6_pins>; + clock-frequency = <400000>; + status = "okay"; +}; + +&mfg0 { + domain-supply = <&mt6359_vproc2_buck_reg>; +}; + +&mfg1 { + domain-supply = <&mt6359_vsram_others_ldo_reg>; +}; + +&mmc0 { + status = "okay"; + pinctrl-names = "default", "state_uhs"; + pinctrl-0 = <&mmc0_default_pins>; + pinctrl-1 = <&mmc0_uhs_pins>; + bus-width = <8>; + max-frequency = <200000000>; + cap-mmc-highspeed; + mmc-hs200-1_8v; + mmc-hs400-1_8v; + supports-cqe; + cap-mmc-hw-reset; + no-sdio; + no-sd; + hs400-ds-delay = <0x1481b>; + vmmc-supply = <&mt6359_vemc_1_ldo_reg>; + vqmmc-supply = <&mt6359_vufs_ldo_reg>; + non-removable; +}; + +&mmc1 { + status = "okay"; + pinctrl-names = "default", "state_uhs"; + pinctrl-0 = <&mmc1_default_pins>; + pinctrl-1 = <&mmc1_uhs_pins>; + bus-width = <4>; + max-frequency = <200000000>; + cap-sd-highspeed; + sd-uhs-sdr50; + sd-uhs-sdr104; + no-mmc; + no-sdio; + cd-gpios = <&pio 2 GPIO_ACTIVE_LOW>; + vmmc-supply = <&mt6359_vpa_buck_reg>; + vqmmc-supply = <&mt6359_vsim1_ldo_reg>; +}; + +&mt6359_vbbck_ldo_reg { + regulator-always-on; +}; + +&mt6359_vcn18_ldo_reg { + regulator-name = "vcn18_pmu"; + regulator-always-on; +}; + +&mt6359_vcn33_2_bt_ldo_reg { + regulator-name = "vcn33_2_pmu"; + regulator-always-on; +}; + +&mt6359_vcore_buck_reg { + regulator-name = "dvdd_proc_l"; + regulator-always-on; +}; + +&mt6359_vgpu11_buck_reg { + regulator-name = "dvdd_core"; + regulator-always-on; +}; + +&mt6359_vpa_buck_reg { + regulator-name = "vpa_pmu"; + regulator-max-microvolt = <3100000>; +}; + +&mt6359_vproc2_buck_reg { + /* The name "vgpu" is required by mtk-regulator-coupler */ + regulator-name = "vgpu"; + regulator-min-microvolt = <550000>; + regulator-max-microvolt = <800000>; + regulator-coupled-with = <&mt6359_vsram_others_ldo_reg>; + regulator-coupled-max-spread = <6250>; +}; + +&mt6359_vpu_buck_reg { + regulator-name = "dvdd_adsp"; + regulator-always-on; +}; + +&mt6359_vrf12_ldo_reg { + regulator-name = "va12_abb2_pmu"; + regulator-always-on; +}; + +&mt6359_vsim1_ldo_reg { + regulator-name = "vsim1_pmu"; + regulator-enable-ramp-delay = <480>; +}; + +&mt6359_vsram_others_ldo_reg { + /* The name "vsram_gpu" is required by mtk-regulator-coupler */ + regulator-name = "vsram_gpu"; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <800000>; + regulator-coupled-with = <&mt6359_vproc2_buck_reg>; + regulator-coupled-max-spread = <6250>; +}; + +&mt6359_vufs_ldo_reg { + regulator-name = "vufs18_pmu"; + regulator-always-on; +}; + +&mt6359codec { + mediatek,mic-type-0 = <1>; /* ACC */ + mediatek,mic-type-1 = <3>; /* DCC */ +}; + +&pcie { + pinctrl-names = "default"; + pinctrl-0 = <&pcie_pins_default>; + status = "okay"; +}; + +&pciephy { + status = "okay"; +}; + +&pio { + audio_default_pins: audio-default-pins { + pins-cmd-dat { + pinmux = <PINMUX_GPIO101__FUNC_O_AUD_CLK_MOSI>, + <PINMUX_GPIO102__FUNC_O_AUD_SYNC_MOSI>, + <PINMUX_GPIO103__FUNC_O_AUD_DAT_MOSI0>, + <PINMUX_GPIO104__FUNC_O_AUD_DAT_MOSI1>, + <PINMUX_GPIO105__FUNC_I0_AUD_DAT_MISO0>, + <PINMUX_GPIO106__FUNC_I0_AUD_DAT_MISO1>, + <PINMUX_GPIO107__FUNC_B0_I2SIN_MCK>, + <PINMUX_GPIO108__FUNC_B0_I2SIN_BCK>, + <PINMUX_GPIO109__FUNC_B0_I2SIN_WS>, + <PINMUX_GPIO110__FUNC_I0_I2SIN_D0>, + <PINMUX_GPIO114__FUNC_O_I2SO2_MCK>, + <PINMUX_GPIO115__FUNC_B0_I2SO2_BCK>, + <PINMUX_GPIO116__FUNC_B0_I2SO2_WS>, + <PINMUX_GPIO117__FUNC_O_I2SO2_D0>, + <PINMUX_GPIO118__FUNC_O_I2SO2_D1>, + <PINMUX_GPIO121__FUNC_B0_PCM_CLK>, + <PINMUX_GPIO122__FUNC_B0_PCM_SYNC>, + <PINMUX_GPIO124__FUNC_I0_PCM_DI>, + <PINMUX_GPIO125__FUNC_O_DMIC1_CLK>, + <PINMUX_GPIO126__FUNC_I0_DMIC1_DAT>, + <PINMUX_GPIO128__FUNC_O_DMIC2_CLK>, + <PINMUX_GPIO129__FUNC_I0_DMIC2_DAT>; + }; + }; + + dptx_pins: dptx-pins { + pins-cmd-dat { + pinmux = <PINMUX_GPIO46__FUNC_I0_DP_TX_HPD>; + bias-pull-up; + }; + }; + + edp_panel_3v3_en_pins: edp-panel-3v3-en-pins { + pins1 { + pinmux = <PINMUX_GPIO15__FUNC_B_GPIO15>; + output-high; + }; + }; + + eth_default_pins: eth-default-pins { + pins-cc { + pinmux = <PINMUX_GPIO139__FUNC_B0_GBE_TXC>, + <PINMUX_GPIO140__FUNC_I0_GBE_RXC>, + <PINMUX_GPIO141__FUNC_I0_GBE_RXDV>, + <PINMUX_GPIO142__FUNC_O_GBE_TXEN>; + drive-strength = <8>; + }; + + pins-mdio { + pinmux = <PINMUX_GPIO143__FUNC_O_GBE_MDC>, + <PINMUX_GPIO144__FUNC_B1_GBE_MDIO>; + drive-strength = <8>; + input-enable; + }; + + pins-power { + pinmux = <PINMUX_GPIO145__FUNC_B_GPIO145>, + <PINMUX_GPIO146__FUNC_B_GPIO146>; + output-high; + }; + + pins-rxd { + pinmux = <PINMUX_GPIO135__FUNC_I0_GBE_RXD3>, + <PINMUX_GPIO136__FUNC_I0_GBE_RXD2>, + <PINMUX_GPIO137__FUNC_I0_GBE_RXD1>, + <PINMUX_GPIO138__FUNC_I0_GBE_RXD0>; + drive-strength = <8>; + }; + + pins-txd { + pinmux = <PINMUX_GPIO131__FUNC_O_GBE_TXD3>, + <PINMUX_GPIO132__FUNC_O_GBE_TXD2>, + <PINMUX_GPIO133__FUNC_O_GBE_TXD1>, + <PINMUX_GPIO134__FUNC_O_GBE_TXD0>; + drive-strength = <8>; + }; + }; + + eth_sleep_pins: eth-sleep-pins { + pins-cc { + pinmux = <PINMUX_GPIO139__FUNC_B_GPIO139>, + <PINMUX_GPIO140__FUNC_B_GPIO140>, + <PINMUX_GPIO141__FUNC_B_GPIO141>, + <PINMUX_GPIO142__FUNC_B_GPIO142>; + }; + + pins-mdio { + pinmux = <PINMUX_GPIO143__FUNC_B_GPIO143>, + <PINMUX_GPIO144__FUNC_B_GPIO144>; + input-disable; + bias-disable; + }; + + pins-rxd { + pinmux = <PINMUX_GPIO135__FUNC_B_GPIO135>, + <PINMUX_GPIO136__FUNC_B_GPIO136>, + <PINMUX_GPIO137__FUNC_B_GPIO137>, + <PINMUX_GPIO138__FUNC_B_GPIO138>; + }; + + pins-txd { + pinmux = <PINMUX_GPIO131__FUNC_B_GPIO131>, + <PINMUX_GPIO132__FUNC_B_GPIO132>, + <PINMUX_GPIO133__FUNC_B_GPIO133>, + <PINMUX_GPIO134__FUNC_B_GPIO134>; + }; + }; + + i2c0_pins: i2c0-pins { + pins { + pinmux = <PINMUX_GPIO56__FUNC_B1_SDA0>, + <PINMUX_GPIO55__FUNC_B1_SCL0>; + bias-pull-up = <MTK_PULL_SET_RSEL_011>; + drive-strength-microamp = <1000>; + }; + }; + + i2c1_pins: i2c1-pins { + pins { + pinmux = <PINMUX_GPIO58__FUNC_B1_SDA1>, + <PINMUX_GPIO57__FUNC_B1_SCL1>; + bias-pull-up = <MTK_PULL_SET_RSEL_011>; + drive-strength-microamp = <1000>; + }; + }; + + i2c2_pins: i2c2-pins { + pins { + pinmux = <PINMUX_GPIO60__FUNC_B1_SDA2>, + <PINMUX_GPIO59__FUNC_B1_SCL2>; + bias-pull-up = <MTK_PULL_SET_RSEL_011>; + drive-strength-microamp = <1000>; + }; + }; + + i2c3_pins: i2c3-pins { + pins { + pinmux = <PINMUX_GPIO62__FUNC_B1_SDA3>, + <PINMUX_GPIO61__FUNC_B1_SCL3>; + bias-pull-up = <MTK_PULL_SET_RSEL_011>; + drive-strength-microamp = <1000>; + }; + }; + + i2c4_pins: i2c4-pins { + pins { + pinmux = <PINMUX_GPIO64__FUNC_B1_SDA4>, + <PINMUX_GPIO63__FUNC_B1_SCL4>; + bias-pull-up = <MTK_PULL_SET_RSEL_011>; + drive-strength-microamp = <1000>; + }; + }; + + i2c5_pins: i2c5-pins { + pins { + pinmux = <PINMUX_GPIO66__FUNC_B1_SDA5>, + <PINMUX_GPIO65__FUNC_B1_SCL5>; + bias-pull-up = <MTK_PULL_SET_RSEL_011>; + drive-strength-microamp = <1000>; + }; + }; + + i2c6_pins: i2c6-pins { + pins { + pinmux = <PINMUX_GPIO68__FUNC_B1_SDA6>, + <PINMUX_GPIO67__FUNC_B1_SCL6>; + bias-pull-up = <MTK_PULL_SET_RSEL_011>; + drive-strength-microamp = <1000>; + }; + }; + + gpio_key_pins: gpio-key-pins { + pins { + pinmux = <PINMUX_GPIO42__FUNC_B1_KPCOL0>, + <PINMUX_GPIO43__FUNC_B1_KPCOL1>, + <PINMUX_GPIO44__FUNC_B1_KPROW0>; + }; + }; + + mmc0_default_pins: mmc0-default-pins { + pins-clk { + pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>; + drive-strength = <6>; + bias-pull-down = <MTK_PUPD_SET_R1R0_10>; + }; + + pins-cmd-dat { + pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>, + <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>, + <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>, + <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>, + <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>, + <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>, + <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>, + <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>, + <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>; + input-enable; + drive-strength = <6>; + bias-pull-up = <MTK_PUPD_SET_R1R0_01>; + }; + + pins-rst { + pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>; + drive-strength = <6>; + bias-pull-up = <MTK_PUPD_SET_R1R0_01>; + }; + }; + + mmc0_uhs_pins: mmc0-uhs-pins { + pins-clk { + pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>; + drive-strength = <8>; + bias-pull-down = <MTK_PUPD_SET_R1R0_10>; + }; + + pins-cmd-dat { + pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>, + <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>, + <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>, + <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>, + <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>, + <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>, + <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>, + <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>, + <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>; + input-enable; + drive-strength = <8>; + bias-pull-up = <MTK_PUPD_SET_R1R0_01>; + }; + + pins-ds { + pinmux = <PINMUX_GPIO162__FUNC_B0_MSDC0_DSL>; + drive-strength = <8>; + bias-pull-down = <MTK_PUPD_SET_R1R0_10>; + }; + + pins-rst { + pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>; + drive-strength = <8>; + bias-pull-up = <MTK_PUPD_SET_R1R0_01>; + }; + }; + + mmc1_default_pins: mmc1-default-pins { + pins-clk { + pinmux = <PINMUX_GPIO164__FUNC_B1_MSDC1_CLK>; + drive-strength = <6>; + bias-pull-down = <MTK_PUPD_SET_R1R0_10>; + }; + + pins-cmd-dat { + pinmux = <PINMUX_GPIO163__FUNC_B1_MSDC1_CMD>, + <PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0>, + <PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1>, + <PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2>, + <PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3>; + input-enable; + drive-strength = <6>; + bias-pull-up = <MTK_PUPD_SET_R1R0_01>; + }; + + pins-insert { + pinmux = <PINMUX_GPIO2__FUNC_B_GPIO2>; + bias-pull-up; + }; + }; + + mmc1_uhs_pins: mmc1-uhs-pins { + pins-clk { + pinmux = <PINMUX_GPIO164__FUNC_B1_MSDC1_CLK>; + drive-strength = <6>; + bias-pull-down = <MTK_PUPD_SET_R1R0_10>; + }; + + pins-cmd-dat { + pinmux = <PINMUX_GPIO163__FUNC_B1_MSDC1_CMD>, + <PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0>, + <PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1>, + <PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2>, + <PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3>; + input-enable; + drive-strength = <6>; + bias-pull-up = <MTK_PUPD_SET_R1R0_01>; + }; + }; + + mmc2_default_pins: mmc2-default-pins { + pins-clk { + pinmux = <PINMUX_GPIO170__FUNC_B1_MSDC2_CLK>; + drive-strength = <4>; + bias-pull-down = <MTK_PUPD_SET_R1R0_10>; + }; + + pins-cmd-dat { + pinmux = <PINMUX_GPIO169__FUNC_B1_MSDC2_CMD>, + <PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0>, + <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>, + <PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2>, + <PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3>; + input-enable; + drive-strength = <6>; + bias-pull-up = <MTK_PUPD_SET_R1R0_01>; + }; + + pins-pcm { + pinmux = <PINMUX_GPIO123__FUNC_O_PCM_DO>; + }; + }; + + mmc2_uhs_pins: mmc2-uhs-pins { + pins-clk { + pinmux = <PINMUX_GPIO170__FUNC_B1_MSDC2_CLK>; + drive-strength = <4>; + bias-pull-down = <MTK_PUPD_SET_R1R0_10>; + }; + + pins-cmd-dat { + pinmux = <PINMUX_GPIO169__FUNC_B1_MSDC2_CMD>, + <PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0>, + <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>, + <PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2>, + <PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3>; + input-enable; + drive-strength = <6>; + bias-pull-up = <MTK_PUPD_SET_R1R0_01>; + }; + }; + + mmc2_eint_pins: mmc2-eint-pins { + pins-dat1 { + pinmux = <PINMUX_GPIO172__FUNC_B_GPIO172>; + input-enable; + bias-pull-up = <MTK_PUPD_SET_R1R0_01>; + }; + }; + + mmc2_dat1_pins: mmc2-dat1-pins { + pins-dat1 { + pinmux = <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>; + input-enable; + drive-strength = <6>; + bias-pull-up = <MTK_PUPD_SET_R1R0_01>; + }; + }; + + panel_default_pins: panel-default-pins { + pins-dcdc { + pinmux = <PINMUX_GPIO45__FUNC_B_GPIO45>; + output-low; + }; + + pins-en { + pinmux = <PINMUX_GPIO111__FUNC_B_GPIO111>; + output-low; + }; + + pins-rst { + pinmux = <PINMUX_GPIO25__FUNC_B_GPIO25>; + output-high; + }; + }; + + pcie_pins_default: pcie-default { + mux { + pinmux = <PINMUX_GPIO47__FUNC_I1_WAKEN>, + <PINMUX_GPIO48__FUNC_O_PERSTN>, + <PINMUX_GPIO49__FUNC_B1_CLKREQN>; + bias-pull-up; + }; + }; + + rt1715_int_pins: rt1715-int-pins { + pins_cmd0_dat { + pinmux = <PINMUX_GPIO12__FUNC_B_GPIO12>; + bias-pull-up; + input-enable; + }; + }; + + spi0_pins: spi0-pins { + pins-spi { + pinmux = <PINMUX_GPIO69__FUNC_O_SPIM0_CSB>, + <PINMUX_GPIO70__FUNC_O_SPIM0_CLK>, + <PINMUX_GPIO71__FUNC_B0_SPIM0_MOSI>, + <PINMUX_GPIO72__FUNC_B0_SPIM0_MISO>; + bias-disable; + }; + }; + + spi1_pins: spi1-pins { + pins-spi { + pinmux = <PINMUX_GPIO75__FUNC_O_SPIM1_CSB>, + <PINMUX_GPIO76__FUNC_O_SPIM1_CLK>, + <PINMUX_GPIO77__FUNC_B0_SPIM1_MOSI>, + <PINMUX_GPIO78__FUNC_B0_SPIM1_MISO>; + bias-disable; + }; + }; + + spi2_pins: spi2-pins { + pins-spi { + pinmux = <PINMUX_GPIO79__FUNC_O_SPIM2_CSB>, + <PINMUX_GPIO80__FUNC_O_SPIM2_CLK>, + <PINMUX_GPIO81__FUNC_B0_SPIM2_MOSI>, + <PINMUX_GPIO82__FUNC_B0_SPIM2_MISO>; + bias-disable; + }; + }; + + touch_pins: touch-pins { + pins-irq { + pinmux = <PINMUX_GPIO6__FUNC_B_GPIO6>; + input-enable; + bias-disable; + }; + + pins-reset { + pinmux = <PINMUX_GPIO5__FUNC_B_GPIO5>; + output-high; + }; + }; + + uart0_pins: uart0-pins { + pins { + pinmux = <PINMUX_GPIO31__FUNC_O_UTXD0>, + <PINMUX_GPIO32__FUNC_I1_URXD0>; + bias-pull-up; + }; + }; + + uart1_pins: uart1-pins { + pins { + pinmux = <PINMUX_GPIO33__FUNC_O_UTXD1>, + <PINMUX_GPIO34__FUNC_I1_URXD1>; + bias-pull-up; + }; + }; + + uart2_pins: uart2-pins { + pins { + pinmux = <PINMUX_GPIO35__FUNC_O_UTXD2>, + <PINMUX_GPIO36__FUNC_I1_URXD2>; + bias-pull-up; + }; + }; + + usb_default_pins: usb-default-pins { + pins-iddig { + pinmux = <PINMUX_GPIO83__FUNC_B_GPIO83>; + input-enable; + bias-pull-up; + }; + + pins-valid { + pinmux = <PINMUX_GPIO85__FUNC_I0_VBUSVALID>; + input-enable; + }; + + pins-vbus { + pinmux = <PINMUX_GPIO84__FUNC_O_USB_DRVVBUS>; + output-high; + }; + + }; + + usb1_default_pins: usb1-default-pins { + pins-valid { + pinmux = <PINMUX_GPIO88__FUNC_I0_VBUSVALID_1P>; + input-enable; + }; + + pins-usb-hub-3v3-en { + pinmux = <PINMUX_GPIO112__FUNC_B_GPIO112>; + output-high; + }; + }; + + wifi_pwrseq_pins: wifi-pwrseq-pins { + pins-wifi-enable { + pinmux = <PINMUX_GPIO127__FUNC_B_GPIO127>; + output-low; + }; + }; +}; + +ð { + phy-mode ="rgmii-id"; + phy-handle = <ðernet_phy0>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <ð_default_pins>; + pinctrl-1 = <ð_sleep_pins>; + mediatek,mac-wol; + snps,reset-gpio = <&pio 147 GPIO_ACTIVE_HIGH>; + snps,reset-delays-us = <0 10000 10000>; + status = "okay"; +}; + +ð_mdio { + ethernet_phy0: ethernet-phy@1 { + compatible = "ethernet-phy-id001c.c916"; + reg = <0x1>; + }; +}; + +&pmic { + interrupt-parent = <&pio>; + interrupts = <222 IRQ_TYPE_LEVEL_HIGH>; + + mt6359keys: keys { + compatible = "mediatek,mt6359-keys"; + mediatek,long-press-mode = <1>; + power-off-time-sec = <0>; + + power-key { + linux,keycodes = <KEY_POWER>; + wakeup-source; + }; + }; +}; + +&scp { + memory-region = <&scp_mem>; + status = "okay"; +}; + +&sound { + compatible = "mediatek,mt8390-mt6359-evk", "mediatek,mt8188-mt6359-evb"; + model = "mt8390-evk"; + pinctrl-names = "default"; + pinctrl-0 = <&audio_default_pins>; + audio-routing = + "Headphone", "Headphone L", + "Headphone", "Headphone R"; + mediatek,adsp = <&adsp>; + status = "okay"; + + dai-link-0 { + link-name = "DL_SRC_BE"; + + codec { + sound-dai = <&pmic 0>; + }; + }; +}; + +&spi2 { + pinctrl-0 = <&spi2_pins>; + pinctrl-names = "default"; + mediatek,pad-select = <0>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; +}; + +&uart0 { + pinctrl-0 = <&uart0_pins>; + pinctrl-names = "default"; + status = "okay"; +}; + +&uart1 { + pinctrl-0 = <&uart1_pins>; + pinctrl-names = "default"; + status = "okay"; +}; + +&uart2 { + pinctrl-0 = <&uart2_pins>; + pinctrl-names = "default"; + status = "okay"; +}; + +&u3phy0 { + status = "okay"; +}; + +&u3phy1 { + status = "okay"; +}; + +&u3phy2 { + status = "okay"; +}; + +&xhci0 { + status = "okay"; + vusb33-supply = <&mt6359_vusb_ldo_reg>; +}; + +&xhci1 { + status = "okay"; + vusb33-supply = <&mt6359_vusb_ldo_reg>; + #address-cells = <1>; + #size-cells = <0>; + + hub_2_0: hub@1 { + compatible = "usb451,8025"; + reg = <1>; + peer-hub = <&hub_3_0>; + reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>; + vdd-supply = <&usb_hub_fixed_3v3>; + }; + + hub_3_0: hub@2 { + compatible = "usb451,8027"; + reg = <2>; + peer-hub = <&hub_2_0>; + reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>; + vdd-supply = <&usb_hub_fixed_3v3>; + }; +}; + +&xhci2 { + status = "okay"; + vusb33-supply = <&mt6359_vusb_ldo_reg>; + vbus-supply = <&sdio_fixed_3v3>; /* wifi_3v3 */ +}; diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi index 7655d5e3a034..522e20924e94 100644 --- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi @@ -47,16 +47,20 @@ can_clk: can { cluster1_opp: opp-table-1 { compatible = "operating-points-v2"; opp-shared; + opp-800000000 { opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1030000>; clock-latency-ns = <300000>; }; opp-1000000000 { opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1030000>; clock-latency-ns = <300000>; }; opp-1200000000 { opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <1030000>; clock-latency-ns = <300000>; opp-suspend; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi index 233af3081e84..50fbf7251665 100644 --- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi @@ -47,16 +47,20 @@ can_clk: can { cluster1_opp: opp-table-1 { compatible = "operating-points-v2"; opp-shared; + opp-800000000 { opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1030000>; clock-latency-ns = <300000>; }; opp-1000000000 { opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1030000>; clock-latency-ns = <300000>; }; opp-1200000000 { opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <1030000>; clock-latency-ns = <300000>; opp-suspend; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts index 629121de5a13..5e7181948992 100644 --- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts @@ -147,7 +147,7 @@ rtc: rtc@51 {
&pwm5 { status = "okay"; - pinctrl-names = "active"; + pinctrl-names = "default"; pinctrl-0 = <&pwm5_pin_pull_down>; };
diff --git a/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts b/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts index a94114fb7cc1..96c27fc5005d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts +++ b/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts @@ -274,13 +274,13 @@ otg_vbus_drv: otg-vbus-drv {
&pwm0 { pinctrl-0 = <&pwm0_pin_pull_up>; - pinctrl-names = "active"; + pinctrl-names = "default"; status = "okay"; };
&pwm1 { pinctrl-0 = <&pwm1_pin_pull_up>; - pinctrl-names = "active"; + pinctrl-names = "default"; status = "okay"; };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi index b169be06d4d1..c8eb5481f43d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi @@ -603,7 +603,7 @@ &pwm1 { };
&pwm2 { - pinctrl-names = "active"; + pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin_pull_down>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts index ac79140a9ecd..44cfdfeed668 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts @@ -778,20 +778,6 @@ &uart1 { pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>; uart-has-rtscts; status = "okay"; - - bluetooth { - compatible = "brcm,bcm43438-bt"; - clocks = <&rk809 1>; - clock-names = "lpo"; - device-wakeup-gpios = <&gpio4 RK_PB5 GPIO_ACTIVE_HIGH>; - host-wakeup-gpios = <&gpio4 RK_PB4 GPIO_ACTIVE_HIGH>; - shutdown-gpios = <&gpio4 RK_PB2 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&bt_host_wake &bt_wake &bt_enable>; - vbat-supply = <&vcc3v3_sys>; - vddio-supply = <&vcc_1v8>; - /* vddio comes from regulator on module, use IO bank voltage instead */ - }; };
&uart2 { diff --git a/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi b/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi index e55390629114..8421d4b8c771 100644 --- a/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi @@ -174,6 +174,18 @@ psci { method = "smc"; };
+ reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + scmi_shmem: shmem@10f000 { + compatible = "arm,scmi-shmem"; + reg = <0x0 0x0010f000 0x0 0x100>; + no-map; + }; + }; + timer { compatible = "arm,armv8-timer"; interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>, @@ -199,19 +211,6 @@ xin32k: xin32k { #clock-cells = <0>; };
- sram@10f000 { - compatible = "mmio-sram"; - reg = <0x0 0x0010f000 0x0 0x100>; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0 0x0 0x0010f000 0x100>; - - scmi_shmem: sram@0 { - compatible = "arm,scmi-shmem"; - reg = <0x0 0x100>; - }; - }; - sata1: sata@fc400000 { compatible = "rockchip,rk3568-dwc-ahci", "snps,dwc-ahci"; reg = <0 0xfc400000 0 0x1000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts b/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts index 7c7331936a7f..a9b9db31d2a3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts +++ b/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts @@ -182,8 +182,7 @@ &gmac0 { ð0m0_tx_bus2 ð0m0_rx_bus2 ð0m0_rgmii_clk - ð0m0_rgmii_bus - ðm0_clk0_25m_out>; + ð0m0_rgmii_bus>;
phy-handle = <&rgmii_phy0>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi index 87090cb98020..bcf3cf704a00 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi @@ -73,7 +73,7 @@ &led_green_pwm {
/* phy2 */ &pcie2x1l1 { - reset-gpios = <&gpio4 RK_PD4 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio3 RK_PD4 GPIO_ACTIVE_HIGH>; vpcie3v3-supply = <&vcc3v3_pcie_eth>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts index 9c394f733bbf..b2c30122aacc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts @@ -429,7 +429,7 @@ &pwm2 { };
&pwm13 { - pinctrl-names = "active"; + pinctrl-names = "default"; pinctrl-0 = <&pwm13m2_pins>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi index 9202181fbd65..fcc4cb2e9389 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi @@ -28,10 +28,10 @@ sound { "Headphone Jack", "HPOUTR", "IN2L", "Line In Jack", "IN2R", "Line In Jack", - "Headphone Jack", "MICBIAS", - "IN1L", "Headphone Jack"; + "Microphone Jack", "MICBIAS", + "IN1L", "Microphone Jack"; simple-audio-card,widgets = - "Microphone", "Headphone Jack", + "Microphone", "Microphone Jack", "Headphone", "Headphone Jack", "Line", "Line In Jack";
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi index b33aff0d65c9..bd6a00d13aea 100644 --- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi @@ -12,15 +12,7 @@ mcu_pmx0: pinctrl@4084000 { #pinctrl-cells = <1>; pinctrl-single,register-width = <32>; pinctrl-single,function-mask = <0xffffffff>; - pinctrl-single,gpio-range = - <&mcu_pmx_range 0 21 PIN_GPIO_RANGE_IOPAD>, - <&mcu_pmx_range 23 1 PIN_GPIO_RANGE_IOPAD>, - <&mcu_pmx_range 32 2 PIN_GPIO_RANGE_IOPAD>; bootph-all; - - mcu_pmx_range: gpio-range { - #pinctrl-single,gpio-range-cells = <3>; - }; };
mcu_esm: esm@4100000 { diff --git a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi index 420c77c8e9e5..6aea9d3f134e 100644 --- a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi @@ -42,17 +42,23 @@ &inta_main_dmss { ti,interrupt-ranges = <5 69 35>; };
-&main_pmx0 { - pinctrl-single,gpio-range = - <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 33 38 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 72 22 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 149 2 PIN_GPIO_RANGE_IOPAD>; +&main_conf { + audio_refclk0: clock-controller@82e0 { + compatible = "ti,am62-audio-refclk"; + reg = <0x82e0 0x4>; + clocks = <&k3_clks 157 0>; + assigned-clocks = <&k3_clks 157 0>; + assigned-clock-parents = <&k3_clks 157 16>; + #clock-cells = <0>; + };
- main_pmx0_range: gpio-range { - #pinctrl-single,gpio-range-cells = <3>; + audio_refclk1: clock-controller@82e4 { + compatible = "ti,am62-audio-refclk"; + reg = <0x82e4 0x4>; + clocks = <&k3_clks 157 18>; + assigned-clocks = <&k3_clks 157 18>; + assigned-clock-parents = <&k3_clks 157 34>; + #clock-cells = <0>; }; };
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts index d184e9c1a0a5..adee69607fdb 100644 --- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts +++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts @@ -590,7 +590,7 @@ exp1: gpio@23 { p05-hog { /* P05 - USB2.0_MUX_SEL */ gpio-hog; - gpios = <5 GPIO_ACTIVE_HIGH>; + gpios = <5 GPIO_ACTIVE_LOW>; output-high; };
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi index 3ac2d45a0558..6da7b3a2943c 100644 --- a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi @@ -251,21 +251,6 @@ &inta_main_dmss { ti,interrupt-ranges = <7 71 21>; };
-&main_pmx0 { - pinctrl-single,gpio-range = - <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 33 38 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 72 17 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 101 25 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>, - <&main_pmx0_range 149 2 PIN_GPIO_RANGE_IOPAD>; - - main_pmx0_range: gpio-range { - #pinctrl-single,gpio-range-cells = <3>; - }; -}; - &main_gpio0 { gpio-ranges = <&main_pmx0 0 0 32>, <&main_pmx0 32 33 38>, <&main_pmx0 70 72 17>; diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h index f8f78f622dd2..a2a1eeb36d4b 100644 --- a/arch/arm64/include/asm/mem_encrypt.h +++ b/arch/arm64/include/asm/mem_encrypt.h @@ -21,4 +21,15 @@ static inline bool force_dma_unencrypted(struct device *dev) return is_realm_world(); }
+/* + * For Arm CCA guests, canonical addresses are "encrypted", so no changes + * required for dma_addr_encrypted(). + * The unencrypted DMA buffers must be accessed via the unprotected IPA, + * "top IPA bit" set. + */ +#define dma_addr_unencrypted(x) ((x) | PROT_NS_SHARED) + +/* Clear the "top" IPA bit while converting back */ +#define dma_addr_canonical(x) ((x) & ~PROT_NS_SHARED) + #endif /* __ASM_MEM_ENCRYPT_H */ diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c index deff21bfa680..b68e1d328d4c 100644 --- a/arch/arm64/kernel/compat_alignment.c +++ b/arch/arm64/kernel/compat_alignment.c @@ -368,6 +368,8 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs) return 1; }
+ if (!handler) + return 1; type = handler(addr, instr, regs);
if (type == TYPE_ERROR || type == TYPE_FAULT) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 2b8bd27a852f..bdb989c49c09 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -382,8 +382,8 @@ config CMDLINE_BOOTLOADER config CMDLINE_EXTEND bool "Use built-in to extend bootloader kernel arguments" help - The command-line arguments provided during boot will be - appended to the built-in command line. This is useful in + The built-in command line will be appended to the command- + line arguments provided during boot. This is useful in cases where the provided arguments are insufficient and you don't want to or cannot modify them.
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h index 1b6d09617199..aa622c754414 100644 --- a/arch/loongarch/include/asm/cache.h +++ b/arch/loongarch/include/asm/cache.h @@ -8,6 +8,8 @@ #define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_DMA_MINALIGN (16) + #define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */ diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index a0ca84da8541..12bd15578c33 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -53,7 +53,7 @@ void spurious_interrupt(void); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
-#define MAX_IO_PICS 2 +#define MAX_IO_PICS 8 #define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group { diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h index f23adb15f418..fc8b64773794 100644 --- a/arch/loongarch/include/asm/stacktrace.h +++ b/arch/loongarch/include/asm/stacktrace.h @@ -8,6 +8,7 @@ #include <asm/asm.h> #include <asm/ptrace.h> #include <asm/loongarch.h> +#include <asm/unwind_hints.h> #include <linux/stringify.h>
enum stack_type { @@ -43,6 +44,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i static __always_inline void prepare_frametrace(struct pt_regs *regs) { __asm__ __volatile__( + UNWIND_HINT_SAVE /* Save $ra */ STORE_ONE_REG(1) /* Use $ra to save PC */ @@ -80,6 +82,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs) STORE_ONE_REG(29) STORE_ONE_REG(30) STORE_ONE_REG(31) + UNWIND_HINT_RESTORE : "=m" (regs->csr_era) : "r" (regs->regs) : "memory"); diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h index a01086ad9dde..2c68bc72736c 100644 --- a/arch/loongarch/include/asm/unwind_hints.h +++ b/arch/loongarch/include/asm/unwind_hints.h @@ -23,6 +23,14 @@ UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL .endm
-#endif /* __ASSEMBLY__ */ +#else /* !__ASSEMBLY__ */ + +#define UNWIND_HINT_SAVE \ + UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0) + +#define UNWIND_HINT_RESTORE \ + UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0) + +#endif /* !__ASSEMBLY__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */ diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 2f1f5b08638f..27144de5c5fe 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void) return -ENODEV;
clk = of_clk_get(np, 0); + of_node_put(np); + if (IS_ERR(clk)) return -ENODEV;
diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c index 445c452d72a7..7be5b4c0c900 100644 --- a/arch/loongarch/kernel/kgdb.c +++ b/arch/loongarch/kernel/kgdb.c @@ -8,6 +8,7 @@ #include <linux/hw_breakpoint.h> #include <linux/kdebug.h> #include <linux/kgdb.h> +#include <linux/objtool.h> #include <linux/processor.h> #include <linux/ptrace.h> #include <linux/sched.h> @@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) regs->csr_era = pc; }
-void arch_kgdb_breakpoint(void) +noinline void arch_kgdb_breakpoint(void) { __asm__ __volatile__ ( \ ".globl kgdb_breakinst\n\t" \ - "nop\n" \ "kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */ } +STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
/* * Calls linux_debug_hook before the kernel dies. If KGDB is enabled, diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index ea357a3edc09..fa1500d4aa3e 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx) */ if (seen_tail_call(ctx) && seen_call(ctx)) move_reg(ctx, TCC_SAVED, REG_TCC); + else + emit_insn(ctx, nop);
ctx->stack_size = stack_adjust; } @@ -905,7 +907,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
move_addr(ctx, t1, func_addr); emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0); - move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); + + if (insn->src_reg != BPF_PSEUDO_CALL) + move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); + break;
/* tail call */ @@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext { const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
- move_imm(ctx, dst, imm64, is32); + if (bpf_pseudo_func(insn)) + move_addr(ctx, dst, imm64); + else + move_imm(ctx, dst, imm64, is32); return 1; }
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h index 68586338ecf8..f9c569f53949 100644 --- a/arch/loongarch/net/bpf_jit.h +++ b/arch/loongarch/net/bpf_jit.h @@ -27,6 +27,11 @@ struct jit_data { struct jit_ctx ctx; };
+static inline void emit_nop(union loongarch_instruction *insn) +{ + insn->word = INSN_NOP; +} + #define emit_insn(ctx, func, ...) \ do { \ if (ctx->image != NULL) { \ diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index 8f2676c3a988..3c43c09d4489 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h @@ -95,10 +95,24 @@ static inline void set_fc(unsigned long val) "movec %0,%/dfc\n\t" : /* no outputs */ : "r" (val) : "memory"); } + +static inline unsigned long get_fc(void) +{ + unsigned long val; + + __asm__ ("movec %/dfc,%0" : "=r" (val) : ); + + return val; +} #else static inline void set_fc(unsigned long val) { } + +static inline unsigned long get_fc(void) +{ + return USER_DATA; +} #endif /* CONFIG_CPU_HAS_ADDRESS_SPACES */
struct thread_struct { diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index 119bd32efcfb..b39fc3717d8e 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c @@ -17,6 +17,7 @@ #include <linux/bitops.h> #include <linux/module.h> #include <linux/sched/mm.h> +#include <linux/string_choices.h>
#include <asm/setup.h> #include <asm/traps.h> @@ -370,8 +371,8 @@ int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault) }
#ifdef DEBUG_MMU_EMU - pr_info("%s: vaddr=%lx type=%s crp=%p\n", __func__, vaddr, - read_flag ? "read" : "write", crp); + pr_info("%s: vaddr=%lx type=%s crp=%px\n", __func__, vaddr, + str_read_write(read_flag), crp); #endif
segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF; @@ -417,7 +418,7 @@ int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault) pte_val (*pte) |= SUN3_PAGE_ACCESSED;
#ifdef DEBUG_MMU_EMU - pr_info("seg:%ld crp:%p ->", get_fs().seg, crp); + pr_info("seg:%ld crp:%px ->", get_fc(), crp); print_pte_vaddr (vaddr); pr_cont("\n"); #endif diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index aa9cd4b951fe..96831c988606 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -132,16 +132,16 @@ #define SO_PASSPIDFD 0x404A #define SO_PEERPIDFD 0x404B
-#define SO_DEVMEM_LINEAR 78 -#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR -#define SO_DEVMEM_DMABUF 79 -#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF -#define SO_DEVMEM_DONTNEED 80 - #define SCM_TS_OPT_ID 0x404C
#define SO_RCVPRIORITY 0x404D
+#define SO_DEVMEM_LINEAR 0x404E +#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR +#define SO_DEVMEM_DMABUF 0x404F +#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF +#define SO_DEVMEM_DONTNEED 0x4050 + #if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 77306be62e9e..129355f87f80 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -78,4 +78,4 @@ CONFIG_DEBUG_VM_PGTABLE=y CONFIG_DETECT_HUNG_TASK=y CONFIG_BDI_SWITCH=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_GENERIC_PTDUMP=y +CONFIG_PTDUMP_DEBUGFS=y diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index 9b38f4a7bc15..2f00b22b0823 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile @@ -51,3 +51,4 @@ $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y +OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S index 104c9911f406..dd86e338307d 100644 --- a/arch/powerpc/kexec/relocate_32.S +++ b/arch/powerpc/kexec/relocate_32.S @@ -348,16 +348,13 @@ write_utlb: rlwinm r10, r24, 0, 22, 27
cmpwi r10, PPC47x_TLB0_4K - bne 0f li r10, 0x1000 /* r10 = 4k */ - ANNOTATE_INTRA_FUNCTION_CALL - bl 1f + beq 0f
-0: /* Defaults to 256M */ lis r10, 0x1000
- bcl 20,31,$+4 +0: bcl 20,31,$+4 1: mflr r4 addi r4, r4, (2f-1b) /* virtual address of 2f */
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 2b79171ee185..f4e03aaabb4c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -132,7 +132,10 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {} static inline void power_pmu_bhrb_disable(struct perf_event *event) {} -static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {} +static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) +{ +} static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {} static void pmao_restore_workaround(bool ebb) { } #endif /* CONFIG_PPC32 */ @@ -444,7 +447,8 @@ static void power_pmu_bhrb_disable(struct perf_event *event) /* Called from ctxsw to prevent one process's branch entries to * mingle with the other process's entries during context switch. */ -static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { if (!ppmu->bhrb_nr) return; diff --git a/arch/powerpc/perf/vpa-pmu.c b/arch/powerpc/perf/vpa-pmu.c index 6a5bfd2a13b5..840733468959 100644 --- a/arch/powerpc/perf/vpa-pmu.c +++ b/arch/powerpc/perf/vpa-pmu.c @@ -156,6 +156,7 @@ static void vpa_pmu_del(struct perf_event *event, int flags) }
static struct pmu vpa_pmu = { + .module = THIS_MODULE, .task_ctx_nr = perf_sw_context, .name = "vpa_pmu", .event_init = vpa_pmu_event_init, diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c index 827d338deaf4..2c2999de6bfa 100644 --- a/arch/powerpc/platforms/cell/spufs/gang.c +++ b/arch/powerpc/platforms/cell/spufs/gang.c @@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void) mutex_init(&gang->aff_mutex); INIT_LIST_HEAD(&gang->list); INIT_LIST_HEAD(&gang->aff_list_head); + gang->alive = 1;
out: return gang; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 70236d1df3d3..9f9e4b871627 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -192,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir, return -ENOMEM; ret = spufs_new_file(dir->d_sb, dentry, files->ops, files->mode & mode, files->size, ctx); - if (ret) + if (ret) { + dput(dentry); return ret; + } files++; } return 0; }
+static void unuse_gang(struct dentry *dir) +{ + struct inode *inode = dir->d_inode; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; + + if (gang) { + bool dead; + + inode_lock(inode); // exclusion with spufs_create_context() + dead = !--gang->alive; + inode_unlock(inode); + + if (dead) + simple_recursive_removal(dir, NULL); + } +} + static int spufs_dir_close(struct inode *inode, struct file *file) { struct inode *parent; @@ -213,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file) inode_unlock(parent); WARN_ON(ret);
+ unuse_gang(dir->d_parent); return dcache_dir_close(inode, file); }
@@ -405,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, { int ret; int affinity; - struct spu_gang *gang; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry};
@@ -420,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) return -ENODEV;
- gang = NULL; + if (gang) { + if (!gang->alive) + return -ENOENT; + gang->alive++; + } + neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { - gang = SPUFS_I(inode)->i_gang; if (!gang) return -EINVAL; mutex_lock(&gang->aff_mutex); @@ -436,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, }
ret = spufs_mkdir(inode, dentry, flags, mode & 0777); - if (ret) + if (ret) { + if (neighbor) + put_spu_context(neighbor); goto out_aff_unlock; + }
if (affinity) { spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx, @@ -453,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); + if (ret && gang) + gang->alive--; // can't reach 0 return ret; }
@@ -482,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode); + dget(dentry); inc_nlink(dir); inc_nlink(d_inode(dentry)); return ret; @@ -492,6 +522,21 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) return ret; }
+static int spufs_gang_close(struct inode *inode, struct file *file) +{ + unuse_gang(file->f_path.dentry); + return dcache_dir_close(inode, file); +} + +static const struct file_operations spufs_gang_fops = { + .open = dcache_dir_open, + .release = spufs_gang_close, + .llseek = dcache_dir_lseek, + .read = generic_read_dir, + .iterate_shared = dcache_readdir, + .fsync = noop_fsync, +}; + static int spufs_gang_open(const struct path *path) { int ret; @@ -511,7 +556,7 @@ static int spufs_gang_open(const struct path *path) return PTR_ERR(filp); }
- filp->f_op = &simple_dir_operations; + filp->f_op = &spufs_gang_fops; fd_install(ret, filp); return ret; } @@ -526,10 +571,8 @@ static int spufs_create_gang(struct inode *inode, ret = spufs_mkgang(inode, dentry, mode & 0777); if (!ret) { ret = spufs_gang_open(&path); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); - } + if (ret < 0) + unuse_gang(dentry); } return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 84958487f696..d33787c57c39 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -151,6 +151,8 @@ struct spu_gang { int aff_flags; struct spu *aff_ref_spu; atomic_t aff_sched_count; + + int alive; };
/* Flag bits for spu_gang aff_flags */ diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 7612c52e9b1e..5d63abc499ce 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -149,7 +149,7 @@ config RISCV select HAVE_DYNAMIC_FTRACE_WITH_ARGS if HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_GRAPH_FUNC select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL - select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_GRAPH_TRACER if HAVE_DYNAMIC_FTRACE_WITH_ARGS select HAVE_FUNCTION_GRAPH_FREGS select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION select HAVE_EBPF_JIT if MMU diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile index f0da9d7b39c3..bc6c77ba837d 100644 --- a/arch/riscv/errata/Makefile +++ b/arch/riscv/errata/Makefile @@ -1,5 +1,9 @@ ifdef CONFIG_RELOCATABLE -KBUILD_CFLAGS += -fno-pie +# We can't use PIC/PIE when handling early-boot errata parsing, as the kernel +# doesn't have a GOT setup at that point. So instead just use medany: it's +# usually position-independent, so it should be good enough for the errata +# handling. +KBUILD_CFLAGS += -fno-pie -mcmodel=medany endif
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 569140d6e639..19defdc2002d 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -63,7 +63,7 @@ void __init riscv_user_isa_enable(void); #define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
-bool check_unaligned_access_emulated_all_cpus(void); +bool __init check_unaligned_access_emulated_all_cpus(void); #if defined(CONFIG_RISCV_SCALAR_MISALIGNED) void check_unaligned_access_emulated(struct work_struct *work __always_unused); void unaligned_emulation_finish(void); @@ -76,7 +76,7 @@ static inline bool unaligned_ctl_available(void) } #endif
-bool check_vector_unaligned_access_emulated_all_cpus(void); +bool __init check_vector_unaligned_access_emulated_all_cpus(void); #if defined(CONFIG_RISCV_VECTOR_MISALIGNED) void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused); DECLARE_PER_CPU(long, vector_misaligned_access); diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index c4721ce44ca4..2636ee00ccf0 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -92,7 +92,7 @@ struct dyn_arch_ftrace { #define make_call_t0(caller, callee, call) \ do { \ unsigned int offset = \ - (unsigned long) callee - (unsigned long) caller; \ + (unsigned long) (callee) - (unsigned long) (caller); \ call[0] = to_auipc_t0(offset); \ call[1] = to_jalr_t0(offset); \ } while (0) @@ -108,7 +108,7 @@ do { \ #define make_call_ra(caller, callee, call) \ do { \ unsigned int offset = \ - (unsigned long) callee - (unsigned long) caller; \ + (unsigned long) (callee) - (unsigned long) (caller); \ call[0] = to_auipc_ra(offset); \ call[1] = to_jalr_ra(offset); \ } while (0) diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index 3c37661801f9..e783a72d051f 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -468,6 +468,9 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, case R_RISCV_ALIGN: case R_RISCV_RELAX: break; + case R_RISCV_64: + *(u64 *)loc = val; + break; default: pr_err("Unknown rela relocation: %d\n", r_type); return -ENOEXEC; diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index 068168046e0e..da4a4000e57e 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -12,8 +12,6 @@ #include <asm/asm-offsets.h> #include <asm/ftrace.h>
-#define ABI_SIZE_ON_STACK 80 - .text
.macro SAVE_ABI_STATE @@ -28,12 +26,12 @@ * register if a0 was not saved. */ .macro SAVE_RET_ABI_STATE - addi sp, sp, -ABI_SIZE_ON_STACK - REG_S ra, 1*SZREG(sp) - REG_S s0, 8*SZREG(sp) - REG_S a0, 10*SZREG(sp) - REG_S a1, 11*SZREG(sp) - addi s0, sp, ABI_SIZE_ON_STACK + addi sp, sp, -FREGS_SIZE_ON_STACK + REG_S ra, FREGS_RA(sp) + REG_S s0, FREGS_S0(sp) + REG_S a0, FREGS_A0(sp) + REG_S a1, FREGS_A1(sp) + addi s0, sp, FREGS_SIZE_ON_STACK .endm
.macro RESTORE_ABI_STATE @@ -43,11 +41,11 @@ .endm
.macro RESTORE_RET_ABI_STATE - REG_L ra, 1*SZREG(sp) - REG_L s0, 8*SZREG(sp) - REG_L a0, 10*SZREG(sp) - REG_L a1, 11*SZREG(sp) - addi sp, sp, ABI_SIZE_ON_STACK + REG_L ra, FREGS_RA(sp) + REG_L s0, FREGS_S0(sp) + REG_L a0, FREGS_A0(sp) + REG_L a1, FREGS_A1(sp) + addi sp, sp, FREGS_SIZE_ON_STACK .endm
SYM_TYPED_FUNC_START(ftrace_stub) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 7cc108aed74e..4354c87c0376 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -605,16 +605,10 @@ void check_vector_unaligned_access_emulated(struct work_struct *work __always_un kernel_vector_end(); }
-bool check_vector_unaligned_access_emulated_all_cpus(void) +bool __init check_vector_unaligned_access_emulated_all_cpus(void) { int cpu;
- if (!has_vector()) { - for_each_online_cpu(cpu) - per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; - return false; - } - schedule_on_each_cpu(check_vector_unaligned_access_emulated);
for_each_online_cpu(cpu) @@ -625,7 +619,7 @@ bool check_vector_unaligned_access_emulated_all_cpus(void) return true; } #else -bool check_vector_unaligned_access_emulated_all_cpus(void) +bool __init check_vector_unaligned_access_emulated_all_cpus(void) { return false; } @@ -659,7 +653,7 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused) } }
-bool check_unaligned_access_emulated_all_cpus(void) +bool __init check_unaligned_access_emulated_all_cpus(void) { int cpu;
@@ -684,7 +678,7 @@ bool unaligned_ctl_available(void) return unaligned_ctl; } #else -bool check_unaligned_access_emulated_all_cpus(void) +bool __init check_unaligned_access_emulated_all_cpus(void) { return false; } diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index 91f189cf1611..a42115fbdeb8 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -121,7 +121,7 @@ static int check_unaligned_access(void *param) return 0; }
-static void check_unaligned_access_nonboot_cpu(void *param) +static void __init check_unaligned_access_nonboot_cpu(void *param) { unsigned int cpu = smp_processor_id(); struct page **pages = param; @@ -175,7 +175,7 @@ static void set_unaligned_access_static_branches(void) modify_unaligned_access_branches(&fast_and_online, num_online_cpus()); }
-static int lock_and_set_unaligned_access_static_branch(void) +static int __init lock_and_set_unaligned_access_static_branch(void) { cpus_read_lock(); set_unaligned_access_static_branches(); @@ -218,7 +218,7 @@ static int riscv_offline_cpu(unsigned int cpu) }
/* Measure unaligned access speed on all CPUs present at boot in parallel. */ -static int check_unaligned_access_speed_all_cpus(void) +static void __init check_unaligned_access_speed_all_cpus(void) { unsigned int cpu; unsigned int cpu_count = num_possible_cpus(); @@ -226,7 +226,7 @@ static int check_unaligned_access_speed_all_cpus(void)
if (!bufs) { pr_warn("Allocation failure, not measuring misaligned performance\n"); - return 0; + return; }
/* @@ -247,13 +247,6 @@ static int check_unaligned_access_speed_all_cpus(void) /* Check core 0. */ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
- /* - * Setup hotplug callbacks for any new CPUs that come online or go - * offline. - */ - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online", - riscv_online_cpu, riscv_offline_cpu); - out: for_each_cpu(cpu, cpu_online_mask) { if (bufs[cpu]) @@ -261,12 +254,10 @@ static int check_unaligned_access_speed_all_cpus(void) }
kfree(bufs); - return 0; } #else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */ -static int check_unaligned_access_speed_all_cpus(void) +static void __init check_unaligned_access_speed_all_cpus(void) { - return 0; } #endif
@@ -349,7 +340,7 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned vector access speed\n", cpu);
- return; + goto free; }
if (word_cycles < byte_cycles) @@ -363,57 +354,69 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus (speed == RISCV_HWPROBE_MISALIGNED_VECTOR_FAST) ? "fast" : "slow");
per_cpu(vector_misaligned_access, cpu) = speed; -} - -static int riscv_online_cpu_vec(unsigned int cpu) -{ - if (!has_vector()) - return 0;
- if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED) - return 0; - - check_vector_unaligned_access_emulated(NULL); - check_vector_unaligned_access(NULL); - return 0; +free: + __free_pages(page, MISALIGNED_BUFFER_ORDER); }
/* Measure unaligned access speed on all CPUs present at boot in parallel. */ -static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) +static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) { schedule_on_each_cpu(check_vector_unaligned_access);
- /* - * Setup hotplug callbacks for any new CPUs that come online or go - * offline. - */ - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online", - riscv_online_cpu_vec, NULL); - return 0; } #else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */ -static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) +static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) { return 0; } #endif
-static int check_unaligned_access_all_cpus(void) +static int riscv_online_cpu_vec(unsigned int cpu) { - bool all_cpus_emulated, all_cpus_vec_unsupported; + if (!has_vector()) { + per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; + return 0; + } + +#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS + if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) + return 0;
- all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); - all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus(); + check_vector_unaligned_access_emulated(NULL); + check_vector_unaligned_access(NULL); +#endif + + return 0; +}
- if (!all_cpus_vec_unsupported && - IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) { +static int __init check_unaligned_access_all_cpus(void) +{ + int cpu; + + if (!check_unaligned_access_emulated_all_cpus()) + check_unaligned_access_speed_all_cpus(); + + if (!has_vector()) { + for_each_online_cpu(cpu) + per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; + } else if (!check_vector_unaligned_access_emulated_all_cpus() && + IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) { kthread_run(vec_check_unaligned_access_speed_all_cpus, NULL, "vec_check_unaligned_access_speed_all_cpus"); }
- if (!all_cpus_emulated) - return check_unaligned_access_speed_all_cpus(); + /* + * Setup hotplug callbacks for any new CPUs that come online or go + * offline. + */ +#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS + cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online", + riscv_online_cpu, riscv_offline_cpu); +#endif + cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online", + riscv_online_cpu_vec, NULL);
return 0; } diff --git a/arch/riscv/kernel/vec-copy-unaligned.S b/arch/riscv/kernel/vec-copy-unaligned.S index d16f19f1b3b6..7ce4de6f6e69 100644 --- a/arch/riscv/kernel/vec-copy-unaligned.S +++ b/arch/riscv/kernel/vec-copy-unaligned.S @@ -11,7 +11,7 @@
#define WORD_SEW CONCATENATE(e, WORD_EEW) #define VEC_L CONCATENATE(vle, WORD_EEW).v -#define VEC_S CONCATENATE(vle, WORD_EEW).v +#define VEC_S CONCATENATE(vse, WORD_EEW).v
/* void __riscv_copy_vec_words_unaligned(void *, const void *, size_t) */ /* Performs a memcpy without aligning buffers, using word loads and stores. */ diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 1fa8be5ee509..4b24705dc63a 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -172,8 +172,8 @@ module_init(riscv_kvm_init);
static void __exit riscv_kvm_exit(void) { - kvm_riscv_teardown(); - kvm_exit(); + + kvm_riscv_teardown(); } module_exit(riscv_kvm_exit); diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index 2707a51b082c..78ac3216a54d 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -666,6 +666,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba .type = etype, .size = sizeof(struct perf_event_attr), .pinned = true, + .disabled = true, /* * It should never reach here if the platform doesn't support the sscofpmf * extension as mode filtering won't work without it. diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index b4a78a4b35cf..375dd96bb4a0 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -148,22 +148,25 @@ unsigned long hugetlb_mask_last_page(struct hstate *h) static pte_t get_clear_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - unsigned long pte_num) + unsigned long ncontig) { - pte_t orig_pte = ptep_get(ptep); - unsigned long i; - - for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) { - pte_t pte = ptep_get_and_clear(mm, addr, ptep); - - if (pte_dirty(pte)) - orig_pte = pte_mkdirty(orig_pte); - - if (pte_young(pte)) - orig_pte = pte_mkyoung(orig_pte); + pte_t pte, tmp_pte; + bool present; + + pte = ptep_get_and_clear(mm, addr, ptep); + present = pte_present(pte); + while (--ncontig) { + ptep++; + addr += PAGE_SIZE; + tmp_pte = ptep_get_and_clear(mm, addr, ptep); + if (present) { + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } } - - return orig_pte; + return pte; }
static pte_t get_clear_contig_flush(struct mm_struct *mm, @@ -212,6 +215,26 @@ static void clear_flush(struct mm_struct *mm, flush_tlb_range(&vma, saddr, addr); }
+static int num_contig_ptes_from_size(unsigned long sz, size_t *pgsize) +{ + unsigned long hugepage_shift; + + if (sz >= PGDIR_SIZE) + hugepage_shift = PGDIR_SHIFT; + else if (sz >= P4D_SIZE) + hugepage_shift = P4D_SHIFT; + else if (sz >= PUD_SIZE) + hugepage_shift = PUD_SHIFT; + else if (sz >= PMD_SIZE) + hugepage_shift = PMD_SHIFT; + else + hugepage_shift = PAGE_SHIFT; + + *pgsize = 1 << hugepage_shift; + + return sz >> hugepage_shift; +} + /* * When dealing with NAPOT mappings, the privileged specification indicates that * "if an update needs to be made, the OS generally should first mark all of the @@ -226,22 +249,10 @@ void set_huge_pte_at(struct mm_struct *mm, pte_t pte, unsigned long sz) { - unsigned long hugepage_shift, pgsize; + size_t pgsize; int i, pte_num;
- if (sz >= PGDIR_SIZE) - hugepage_shift = PGDIR_SHIFT; - else if (sz >= P4D_SIZE) - hugepage_shift = P4D_SHIFT; - else if (sz >= PUD_SIZE) - hugepage_shift = PUD_SHIFT; - else if (sz >= PMD_SIZE) - hugepage_shift = PMD_SHIFT; - else - hugepage_shift = PAGE_SHIFT; - - pte_num = sz >> hugepage_shift; - pgsize = 1 << hugepage_shift; + pte_num = num_contig_ptes_from_size(sz, &pgsize);
if (!pte_present(pte)) { for (i = 0; i < pte_num; i++, ptep++, addr += pgsize) @@ -295,13 +306,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) { + size_t pgsize; pte_t orig_pte = ptep_get(ptep); int pte_num;
if (!pte_napot(orig_pte)) return ptep_get_and_clear(mm, addr, ptep);
- pte_num = napot_pte_num(napot_cont_order(orig_pte)); + pte_num = num_contig_ptes_from_size(sz, &pgsize);
return get_clear_contig(mm, addr, ptep, pte_num); } @@ -351,6 +363,7 @@ void huge_pte_clear(struct mm_struct *mm, pte_t *ptep, unsigned long sz) { + size_t pgsize; pte_t pte = ptep_get(ptep); int i, pte_num;
@@ -359,8 +372,9 @@ void huge_pte_clear(struct mm_struct *mm, return; }
- pte_num = napot_pte_num(napot_cont_order(pte)); - for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) + pte_num = num_contig_ptes_from_size(sz, &pgsize); + + for (i = 0; i < pte_num; i++, addr += pgsize, ptep++) pte_clear(mm, addr, ptep); }
diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S index 0e6ca6d5ae4b..c5db2f072c34 100644 --- a/arch/riscv/purgatory/entry.S +++ b/arch/riscv/purgatory/entry.S @@ -12,6 +12,7 @@
.text
+.align 2 SYM_CODE_START(purgatory_start)
lla sp, .Lstack diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index fc9933a743d6..251e0372ccbd 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -34,8 +34,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define ioremap_wc(addr, size) \ ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL))) -#define ioremap_wt(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL)))
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 3ca5af4cfe43..2467e521e1c0 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1402,9 +1402,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); #define pgprot_writecombine pgprot_writecombine pgprot_t pgprot_writecombine(pgprot_t prot);
-#define pgprot_writethrough pgprot_writethrough -pgprot_t pgprot_writethrough(pgprot_t prot); - #define PFN_PTE_SHIFT PAGE_SHIFT
/* diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 4cc3408c4dac..88e09a650d2d 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -467,7 +467,7 @@ SYM_CODE_START(mcck_int_handler) clgrjl %r9,%r14, 4f larl %r14,.Lsie_leave clgrjhe %r9,%r14, 4f - lg %r10,__LC_PCPU + lg %r10,__LC_PCPU(%r13) oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST SIEEXIT __SF_SIE_CONTROL(%r15),%r13 diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c index 10725f5a6f0f..63875270941b 100644 --- a/arch/s390/kernel/perf_pai_crypto.c +++ b/arch/s390/kernel/perf_pai_crypto.c @@ -518,7 +518,8 @@ static void paicrypt_have_samples(void) /* Called on schedule-in and schedule-out. No access to event structure, * but for sampling only event CRYPTO_ALL is allowed. */ -static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { /* We started with a clean page on event installation. So read out * results on schedule_out and if page was dirty, save old values. diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c index a8f0bad99cf0..fd14d5ebccbc 100644 --- a/arch/s390/kernel/perf_pai_ext.c +++ b/arch/s390/kernel/perf_pai_ext.c @@ -542,7 +542,8 @@ static void paiext_have_samples(void) /* Called on schedule-in and schedule-out. No access to event structure, * but for sampling only event NNPA_ALL is allowed. */ -static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { /* We started with a clean page on event installation. So read out * results on schedule_out and if page was dirty, save old values. diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index f05e62e037c2..a248764ad958 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -34,16 +34,6 @@ pgprot_t pgprot_writecombine(pgprot_t prot) } EXPORT_SYMBOL_GPL(pgprot_writecombine);
-pgprot_t pgprot_writethrough(pgprot_t prot) -{ - /* - * mio_wb_bit_mask may be set on a different CPU, but it is only set - * once at init and only read afterwards. - */ - return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask); -} -EXPORT_SYMBOL_GPL(pgprot_writethrough); - static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int nodat) { diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index 5babad8c5f75..bc02767f0639 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -213,7 +213,6 @@ extern int os_protect_memory(void *addr, unsigned long len, extern int os_unmap_memory(void *addr, int len); extern int os_drop_memory(void *addr, int length); extern int can_drop_memory(void); -extern int os_mincore(void *addr, unsigned long len);
void os_set_pdeathsig(void);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index f8567b933ffa..4df1cd0d2017 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -17,7 +17,7 @@ extra-y := vmlinux.lds obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \ physmem.o process.o ptrace.o reboot.o sigio.o \ signal.o sysrq.o time.o tlb.o trap.o \ - um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/ + um_arch.o umid.o kmsg_dump.o capflags.o skas/ obj-y += load_file.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c deleted file mode 100644 index 8ccd56813f68..000000000000 --- a/arch/um/kernel/maccess.c +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013 Richard Weinberger richrd@nod.at - */ - -#include <linux/uaccess.h> -#include <linux/kernel.h> -#include <os.h> - -bool copy_from_kernel_nofault_allowed(const void *src, size_t size) -{ - void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE); - - if ((unsigned long)src < PAGE_SIZE || size <= 0) - return false; - if (os_mincore(psrc, size + src - psrc) <= 0) - return false; - return true; -} diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c index 9f086f939420..184566edeee9 100644 --- a/arch/um/os-Linux/process.c +++ b/arch/um/os-Linux/process.c @@ -142,57 +142,6 @@ int __init can_drop_memory(void) return ok; }
-static int os_page_mincore(void *addr) -{ - char vec[2]; - int ret; - - ret = mincore(addr, UM_KERN_PAGE_SIZE, vec); - if (ret < 0) { - if (errno == ENOMEM || errno == EINVAL) - return 0; - else - return -errno; - } - - return vec[0] & 1; -} - -int os_mincore(void *addr, unsigned long len) -{ - char *vec; - int ret, i; - - if (len <= UM_KERN_PAGE_SIZE) - return os_page_mincore(addr); - - vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); - if (!vec) - return -ENOMEM; - - ret = mincore(addr, UM_KERN_PAGE_SIZE, vec); - if (ret < 0) { - if (errno == ENOMEM || errno == EINVAL) - ret = 0; - else - ret = -errno; - - goto out; - } - - for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) { - if (!(vec[i] & 1)) { - ret = 0; - goto out; - } - } - - ret = 1; -out: - free(vec); - return ret; -} - void init_new_thread_signals(void) { set_handler(SIGSEGV); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0e27ebd7e36a..aaec6ebd6c4e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -232,7 +232,7 @@ config X86 select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64 select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS - select HAVE_EISA + select HAVE_EISA if X86_32 select HAVE_EXIT_THREAD select HAVE_GUP_FAST select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE @@ -902,6 +902,7 @@ config INTEL_TDX_GUEST depends on X86_64 && CPU_SUP_INTEL depends on X86_X2APIC depends on EFI_STUB + depends on PARAVIRT select ARCH_HAS_CC_PLATFORM select X86_MEM_ENCRYPT select X86_MCE diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 2a7279d80460..42e6a40876ea 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -368,7 +368,7 @@ config X86_HAVE_PAE
config X86_CMPXCHG64 def_bool y - depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7 + depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7 || MGEODEGX1 || MGEODE_LX
# this should be set for all -march=.. options where the compiler # generates cmov. diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index a46b1397ad01..c86cbd9cbba3 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um @@ -7,12 +7,13 @@ core-y += arch/x86/crypto/ # GCC versions < 11. See: # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652 # -ifeq ($(CONFIG_CC_IS_CLANG),y) -KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json +ifeq ($(call gcc-min-version, 110000)$(CONFIG_CC_IS_CLANG),y) +KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2 endif
+KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json + ifeq ($(CONFIG_X86_32),y) START := 0x8048000
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index 32809a06dab4..6aad910d119d 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -14,6 +14,7 @@ #include <asm/ia32.h> #include <asm/insn.h> #include <asm/insn-eval.h> +#include <asm/paravirt_types.h> #include <asm/pgtable.h> #include <asm/set_memory.h> #include <asm/traps.h> @@ -398,7 +399,7 @@ static int handle_halt(struct ve_info *ve) return ve_instr_len(ve); }
-void __cpuidle tdx_safe_halt(void) +void __cpuidle tdx_halt(void) { const bool irq_disabled = false;
@@ -409,6 +410,16 @@ void __cpuidle tdx_safe_halt(void) WARN_ONCE(1, "HLT instruction emulation failed\n"); }
+static void __cpuidle tdx_safe_halt(void) +{ + tdx_halt(); + /* + * "__cpuidle" section doesn't support instrumentation, so stick + * with raw_* variant that avoids tracing hooks. + */ + raw_local_irq_enable(); +} + static int read_msr(struct pt_regs *regs, struct ve_info *ve) { struct tdx_module_args args = { @@ -1109,6 +1120,19 @@ void __init tdx_early_init(void) x86_platform.guest.enc_kexec_begin = tdx_kexec_begin; x86_platform.guest.enc_kexec_finish = tdx_kexec_finish;
+ /* + * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that + * will enable interrupts before HLT TDCALL invocation if executed + * in STI-shadow, possibly resulting in missed wakeup events. + * + * Modify all possible HLT execution paths to use TDX specific routines + * that directly execute TDCALL and toggle the interrupt state as + * needed after TDCALL completion. This also reduces HLT related #VEs + * in addition to having a reliable halt logic execution. + */ + pv_ops.irq.safe_halt = tdx_safe_halt; + pv_ops.irq.halt = tdx_halt; + /* * TDX intercepts the RDMSR to read the X2APIC ID in the parallel * bringup low level code. That raises #VE which cannot be handled diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index ea81770629ee..626a81c6015b 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -70,6 +70,8 @@ For 32-bit we have the following conventions - kernel is built with pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ + /* We just clobbered the return address - use the IRET frame for unwinding: */ + UNWIND_HINT_IRET_REGS offset=3*8 .else pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 14db5b85114c..3514bf2978ee 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -142,7 +142,7 @@ static __always_inline int syscall_32_enter(struct pt_regs *regs) #ifdef CONFIG_IA32_EMULATION bool __ia32_enabled __ro_after_init = !IS_ENABLED(CONFIG_IA32_EMULATION_DEFAULT_DISABLED);
-static int ia32_emulation_override_cmdline(char *arg) +static int __init ia32_emulation_override_cmdline(char *arg) { return kstrtobool(arg, &__ia32_enabled); } diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S index 872947c1004c..918606ff92a9 100644 --- a/arch/x86/entry/vdso/vdso-layout.lds.S +++ b/arch/x86/entry/vdso/vdso-layout.lds.S @@ -24,7 +24,7 @@ SECTIONS
timens_page = vvar_start + PAGE_SIZE;
- vclock_pages = vvar_start + VDSO_NR_VCLOCK_PAGES * PAGE_SIZE; + vclock_pages = VDSO_VCLOCK_PAGES_START(vvar_start); pvclock_page = vclock_pages + VDSO_PAGE_PVCLOCK_OFFSET * PAGE_SIZE; hvclock_page = vclock_pages + VDSO_PAGE_HVCLOCK_OFFSET * PAGE_SIZE;
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 39e6efc1a9ca..aa62949335ec 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -290,7 +290,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) }
vma = _install_special_mapping(mm, - addr + (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE, + VDSO_VCLOCK_PAGES_START(addr), VDSO_NR_VCLOCK_PAGES * PAGE_SIZE, VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP| VM_PFNMAP, diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c index 780acd3dff22..ec3427463382 100644 --- a/arch/x86/events/amd/brs.c +++ b/arch/x86/events/amd/brs.c @@ -381,7 +381,8 @@ static void amd_brs_poison_buffer(void) * On ctxswin, sched_in = true, called after the PMU has started * On ctxswout, sched_in = false, called before the PMU is stopped */ -void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c index 19c7b76e21bc..c06ccca96851 100644 --- a/arch/x86/events/amd/lbr.c +++ b/arch/x86/events/amd/lbr.c @@ -371,7 +371,8 @@ void amd_pmu_lbr_del(struct perf_event *event) perf_sched_cb_dec(event->pmu); }
-void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 2092d615333d..3a27c50080f4 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2625,9 +2625,10 @@ static const struct attribute_group *x86_pmu_attr_groups[] = { NULL, };
-static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { - static_call_cond(x86_pmu_sched_task)(pmu_ctx, sched_in); + static_call_cond(x86_pmu_sched_task)(pmu_ctx, task, sched_in); }
static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc, diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index cdb19e3ba3aa..9e8de416d1f0 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2779,28 +2779,33 @@ static u64 icl_update_topdown_event(struct perf_event *event)
DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
-static void intel_pmu_read_topdown_event(struct perf_event *event) +static void intel_pmu_read_event(struct perf_event *event) { - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + bool pmu_enabled = cpuc->enabled;
- /* Only need to call update_topdown_event() once for group read. */ - if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && - !is_slots_event(event)) - return; + /* Only need to call update_topdown_event() once for group read. */ + if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ)) + return;
- perf_pmu_disable(event->pmu); - static_call(intel_pmu_update_topdown_event)(event); - perf_pmu_enable(event->pmu); -} + cpuc->enabled = 0; + if (pmu_enabled) + intel_pmu_disable_all();
-static void intel_pmu_read_event(struct perf_event *event) -{ - if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) - intel_pmu_auto_reload_read(event); - else if (is_topdown_count(event)) - intel_pmu_read_topdown_event(event); - else - x86_perf_event_update(event); + if (is_topdown_event(event)) + static_call(intel_pmu_update_topdown_event)(event); + else + intel_pmu_drain_pebs_buffer(); + + cpuc->enabled = pmu_enabled; + if (pmu_enabled) + intel_pmu_enable_all(0); + + return; + } + + x86_perf_event_update(event); }
static void intel_pmu_enable_fixed(struct perf_event *event) @@ -3070,7 +3075,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
handled++; x86_pmu_handle_guest_pebs(regs, &data); - x86_pmu.drain_pebs(regs, &data); + static_call(x86_pmu_drain_pebs)(regs, &data); status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/* @@ -5244,10 +5249,10 @@ static void intel_pmu_cpu_dead(int cpu) }
static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, - bool sched_in) + struct task_struct *task, bool sched_in) { intel_pmu_pebs_sched_task(pmu_ctx, sched_in); - intel_pmu_lbr_sched_task(pmu_ctx, sched_in); + intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in); }
static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc, diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index f122882ef278..33f4bb22fc0e 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -953,11 +953,11 @@ int intel_pmu_drain_bts_buffer(void) return 1; }
-static inline void intel_pmu_drain_pebs_buffer(void) +void intel_pmu_drain_pebs_buffer(void) { struct perf_sample_data data;
- x86_pmu.drain_pebs(NULL, &data); + static_call(x86_pmu_drain_pebs)(NULL, &data); }
/* @@ -2094,15 +2094,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) return NULL; }
-void intel_pmu_auto_reload_read(struct perf_event *event) -{ - WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)); - - perf_pmu_disable(event->pmu); - intel_pmu_drain_pebs_buffer(); - perf_pmu_enable(event->pmu); -} - /* * Special variant of intel_pmu_save_and_restart() for auto-reload. */ diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index dc641b50814e..24719adbcd7e 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -422,11 +422,17 @@ static __always_inline bool lbr_is_reset_in_cstate(void *ctx) return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos, NULL); }
+static inline bool has_lbr_callstack_users(void *ctx) +{ + return task_context_opt(ctx)->lbr_callstack_users || + x86_pmu.lbr_callstack_users; +} + static void __intel_pmu_lbr_restore(void *ctx) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- if (task_context_opt(ctx)->lbr_callstack_users == 0 || + if (!has_lbr_callstack_users(ctx) || task_context_opt(ctx)->lbr_stack_state == LBR_NONE) { intel_pmu_lbr_reset(); return; @@ -503,7 +509,7 @@ static void __intel_pmu_lbr_save(void *ctx) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- if (task_context_opt(ctx)->lbr_callstack_users == 0) { + if (!has_lbr_callstack_users(ctx)) { task_context_opt(ctx)->lbr_stack_state = LBR_NONE; return; } @@ -539,9 +545,11 @@ void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, task_context_opt(next_ctx_data)->lbr_callstack_users); }
-void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_ctx_data *ctx_data; void *task_ctx;
if (!cpuc->lbr_users) @@ -552,14 +560,18 @@ void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched * the task was scheduled out, restore the stack. Otherwise flush * the LBR stack. */ - task_ctx = pmu_ctx ? pmu_ctx->task_ctx_data : NULL; + rcu_read_lock(); + ctx_data = rcu_dereference(task->perf_ctx_data); + task_ctx = ctx_data ? ctx_data->data : NULL; if (task_ctx) { if (sched_in) __intel_pmu_lbr_restore(task_ctx); else __intel_pmu_lbr_save(task_ctx); + rcu_read_unlock(); return; } + rcu_read_unlock();
/* * Since a context switch can flip the address space and LBR entries @@ -588,9 +600,19 @@ void intel_pmu_lbr_add(struct perf_event *event)
cpuc->br_sel = event->hw.branch_reg.reg;
- if (branch_user_callstack(cpuc->br_sel) && event->pmu_ctx->task_ctx_data) - task_context_opt(event->pmu_ctx->task_ctx_data)->lbr_callstack_users++; + if (branch_user_callstack(cpuc->br_sel)) { + if (event->attach_state & PERF_ATTACH_TASK) { + struct task_struct *task = event->hw.target; + struct perf_ctx_data *ctx_data;
+ rcu_read_lock(); + ctx_data = rcu_dereference(task->perf_ctx_data); + if (ctx_data) + task_context_opt(ctx_data->data)->lbr_callstack_users++; + rcu_read_unlock(); + } else + x86_pmu.lbr_callstack_users++; + } /* * Request pmu::sched_task() callback, which will fire inside the * regular perf event scheduling, so that call will: @@ -664,9 +686,19 @@ void intel_pmu_lbr_del(struct perf_event *event) if (!x86_pmu.lbr_nr) return;
- if (branch_user_callstack(cpuc->br_sel) && - event->pmu_ctx->task_ctx_data) - task_context_opt(event->pmu_ctx->task_ctx_data)->lbr_callstack_users--; + if (branch_user_callstack(cpuc->br_sel)) { + if (event->attach_state & PERF_ATTACH_TASK) { + struct task_struct *task = event->hw.target; + struct perf_ctx_data *ctx_data; + + rcu_read_lock(); + ctx_data = rcu_dereference(task->perf_ctx_data); + if (ctx_data) + task_context_opt(ctx_data->data)->lbr_callstack_users--; + rcu_read_unlock(); + } else + x86_pmu.lbr_callstack_users--; + }
if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT) cpuc->lbr_select = 0; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 31c2771545a6..1dfa78a30266 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -869,7 +869,7 @@ struct x86_pmu {
void (*check_microcode)(void); void (*sched_task)(struct perf_event_pmu_context *pmu_ctx, - bool sched_in); + struct task_struct *task, bool sched_in);
/* * Intel Arch Perfmon v2+ @@ -914,6 +914,7 @@ struct x86_pmu { const int *lbr_sel_map; /* lbr_select mappings */ int *lbr_ctl_map; /* LBR_CTL mappings */ }; + u64 lbr_callstack_users; /* lbr callstack system wide users */ bool lbr_double_abort; /* duplicated lbr aborts */ bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
@@ -1107,6 +1108,7 @@ extern struct x86_pmu x86_pmu __read_mostly;
DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); +DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) { @@ -1394,7 +1396,8 @@ void amd_pmu_lbr_reset(void); void amd_pmu_lbr_read(void); void amd_pmu_lbr_add(struct perf_event *event); void amd_pmu_lbr_del(struct perf_event *event); -void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); +void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in); void amd_pmu_lbr_enable_all(void); void amd_pmu_lbr_disable_all(void); int amd_pmu_lbr_hw_config(struct perf_event *event); @@ -1448,7 +1451,8 @@ static inline void amd_pmu_brs_del(struct perf_event *event) perf_sched_cb_dec(event->pmu); }
-void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); +void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in); #else static inline int amd_brs_init(void) { @@ -1473,7 +1477,8 @@ static inline void amd_pmu_brs_del(struct perf_event *event) { }
-static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { }
@@ -1643,7 +1648,7 @@ void intel_pmu_pebs_disable_all(void);
void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
-void intel_pmu_auto_reload_read(struct perf_event *event); +void intel_pmu_drain_pebs_buffer(void);
void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
@@ -1656,7 +1661,8 @@ void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, struct perf_event_pmu_context *next_epc);
-void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); +void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in);
u64 lbr_from_signext_quirk_wr(u64 val);
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index ec7880271cf9..77bf05f06b9e 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -338,7 +338,7 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) vmsa->sev_features = sev_status >> 2;
ret = snp_set_vmsa(vmsa, true); - if (!ret) { + if (ret) { pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret); free_page((u64)vmsa); return ret; diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index cf7fc2b8e3ce..1c2db11a2c3c 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -76,6 +76,28 @@ static __always_inline void native_local_irq_restore(unsigned long flags)
#endif
+#ifndef CONFIG_PARAVIRT +#ifndef __ASSEMBLY__ +/* + * Used in the idle loop; sti takes one instruction cycle + * to complete: + */ +static __always_inline void arch_safe_halt(void) +{ + native_safe_halt(); +} + +/* + * Used when interrupts are already enabled or to + * shutdown the processor: + */ +static __always_inline void halt(void) +{ + native_halt(); +} +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_PARAVIRT */ + #ifdef CONFIG_PARAVIRT_XXL #include <asm/paravirt.h> #else @@ -97,24 +119,6 @@ static __always_inline void arch_local_irq_enable(void) native_irq_enable(); }
-/* - * Used in the idle loop; sti takes one instruction cycle - * to complete: - */ -static __always_inline void arch_safe_halt(void) -{ - native_safe_halt(); -} - -/* - * Used when interrupts are already enabled or to - * shutdown the processor: - */ -static __always_inline void halt(void) -{ - native_halt(); -} - /* * For spinlocks, etc: */ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 041aff51eb50..29e7331a0c98 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -107,6 +107,16 @@ static inline void notify_page_enc_status_changed(unsigned long pfn, PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc); }
+static __always_inline void arch_safe_halt(void) +{ + PVOP_VCALL0(irq.safe_halt); +} + +static inline void halt(void) +{ + PVOP_VCALL0(irq.halt); +} + #ifdef CONFIG_PARAVIRT_XXL static inline void load_sp0(unsigned long sp0) { @@ -170,16 +180,6 @@ static inline void __write_cr4(unsigned long x) PVOP_VCALL1(cpu.write_cr4, x); }
-static __always_inline void arch_safe_halt(void) -{ - PVOP_VCALL0(irq.safe_halt); -} - -static inline void halt(void) -{ - PVOP_VCALL0(irq.halt); -} - static inline u64 paravirt_read_msr(unsigned msr) { return PVOP_CALL1(u64, cpu.read_msr, msr); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index fea56b04f436..abccfccc2e3f 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -120,10 +120,9 @@ struct pv_irq_ops { struct paravirt_callee_save save_fl; struct paravirt_callee_save irq_disable; struct paravirt_callee_save irq_enable; - +#endif void (*safe_halt)(void); void (*halt)(void); -#endif } __no_randomize_layout;
struct pv_mmu_ops { diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index b4b16dafd55e..40f9a97371a9 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -58,7 +58,7 @@ void tdx_get_ve_info(struct ve_info *ve);
bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
-void tdx_safe_halt(void); +void tdx_halt(void);
bool tdx_early_handle_ve(struct pt_regs *regs);
@@ -72,7 +72,7 @@ void __init tdx_dump_td_ctls(u64 td_ctls); #else
static inline void tdx_early_init(void) { }; -static inline void tdx_safe_halt(void) { }; +static inline void tdx_halt(void) { };
static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 02fc2aa06e9e..3da645139748 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -242,7 +242,7 @@ void flush_tlb_multi(const struct cpumask *cpumask, flush_tlb_mm_range((vma)->vm_mm, start, end, \ ((vma)->vm_flags & VM_HUGETLB) \ ? huge_page_shift(hstate_vma(vma)) \ - : PAGE_SHIFT, false) + : PAGE_SHIFT, true)
extern void flush_tlb_all(void); extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h index 37b4a70559a8..88b31d4cdfaf 100644 --- a/arch/x86/include/asm/vdso/vsyscall.h +++ b/arch/x86/include/asm/vdso/vsyscall.h @@ -6,6 +6,7 @@ #define __VVAR_PAGES 4
#define VDSO_NR_VCLOCK_PAGES 2 +#define VDSO_VCLOCK_PAGES_START(_b) ((_b) + (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE) #define VDSO_PAGE_PVCLOCK_OFFSET 0 #define VDSO_PAGE_HVCLOCK_OFFSET 1
diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c index 6cba85c79d42..97222efb4d2a 100644 --- a/arch/x86/kernel/cpu/bus_lock.c +++ b/arch/x86/kernel/cpu/bus_lock.c @@ -192,7 +192,13 @@ static void __split_lock_reenable(struct work_struct *work) { sld_update_msr(true); } -static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable); +/* + * In order for each CPU to schedule its delayed work independently of the + * others, delayed work struct must be per-CPU. This is not required when + * sysctl_sld_mitigate is enabled because of the semaphore that limits + * the number of simultaneously scheduled delayed works to 1. + */ +static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
/* * If a CPU goes offline with pending delayed work to re-enable split lock @@ -213,7 +219,7 @@ static int splitlock_cpu_offline(unsigned int cpu)
static void split_lock_warn(unsigned long ip) { - struct delayed_work *work; + struct delayed_work *work = NULL; int cpu;
if (!current->reported_split_lock) @@ -235,11 +241,17 @@ static void split_lock_warn(unsigned long ip) if (down_interruptible(&buslock_sem) == -EINTR) return; work = &sl_reenable_unlock; - } else { - work = &sl_reenable; }
cpu = get_cpu(); + + if (!work) { + work = this_cpu_ptr(&sl_reenable); + /* Deferred initialization of per-CPU struct */ + if (!work->work.func) + INIT_DELAYED_WORK(work, __split_lock_reenable); + } + schedule_delayed_work_on(cpu, work, 2);
/* Disable split lock detection on this CPU to make progress */ diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c index dac4d64dfb2a..2235a7477436 100644 --- a/arch/x86/kernel/cpu/mce/severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -300,13 +300,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs) copy_user = is_copy_from_user(regs); instrumentation_end();
- switch (fixup_type) { - case EX_TYPE_UACCESS: - if (!copy_user) - return IN_KERNEL; - m->kflags |= MCE_IN_KERNEL_COPYIN; - fallthrough; + if (copy_user) { + m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV; + return IN_KERNEL_RECOV; + }
+ switch (fixup_type) { case EX_TYPE_FAULT_MCE_SAFE: case EX_TYPE_DEFAULT_MCE_SAFE: m->kflags |= MCE_IN_KERNEL_RECOV; diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 138689b8e1d8..b61028cf5c8a 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -600,7 +600,7 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev, unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize)) - return -1; + return false;
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 6419e04d8a7b..04b653d613e8 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -157,7 +157,8 @@ static int closid_alloc(void)
lockdep_assert_held(&rdtgroup_mutex);
- if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && + is_llc_occupancy_enabled()) { cleanest_closid = resctrl_find_cleanest_closid(); if (cleanest_closid < 0) return cleanest_closid; diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index a7d562697e50..b2b118a8c09b 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -195,6 +195,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack); + stack = stack ?: get_stack_pointer(task, regs); regs = unwind_get_entry_regs(&state, &partial);
/* @@ -213,9 +214,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - hardirq stack * - entry stack */ - for (stack = stack ?: get_stack_pointer(task, regs); - stack; - stack = stack_info.next_sp) { + for (; stack; stack = stack_info.next_sp) { const char *stack_name;
stack = PTR_ALIGN(stack, sizeof(long)); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 1209c7aebb21..dcac3c058fb7 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -220,7 +220,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) struct fpstate *fpstate; unsigned int size;
- size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64); + size = fpu_kernel_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64); fpstate = vzalloc(size); if (!fpstate) return false; @@ -232,8 +232,8 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) fpstate->is_guest = true;
gfpu->fpstate = fpstate; - gfpu->xfeatures = fpu_user_cfg.default_features; - gfpu->perm = fpu_user_cfg.default_features; + gfpu->xfeatures = fpu_kernel_cfg.default_features; + gfpu->perm = fpu_kernel_cfg.default_features;
/* * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 1ccaa3397a67..c5bb980b8a67 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -110,6 +110,11 @@ int paravirt_disable_iospace(void) return request_resource(&ioport_resource, &reserve_ioports); }
+static noinstr void pv_native_safe_halt(void) +{ + native_safe_halt(); +} + #ifdef CONFIG_PARAVIRT_XXL static noinstr void pv_native_write_cr2(unsigned long val) { @@ -125,11 +130,6 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val) { native_set_debugreg(regno, val); } - -static noinstr void pv_native_safe_halt(void) -{ - native_safe_halt(); -} #endif
struct pv_info pv_info = { @@ -186,9 +186,11 @@ struct paravirt_patch_template pv_ops = { .irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl), .irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable), .irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable), +#endif /* CONFIG_PARAVIRT_XXL */ + + /* Irq HLT ops. */ .irq.safe_halt = pv_native_safe_halt, .irq.halt = native_halt, -#endif /* CONFIG_PARAVIRT_XXL */
/* Mmu ops. */ .mmu.flush_tlb_user = native_flush_tlb_local, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6da6769d7254..21561262a821 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -93,7 +93,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { - memcpy(dst, src, arch_task_struct_size); + /* init_task is not dynamically sized (incomplete FPU state) */ + if (unlikely(src == &init_task)) + memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0); + else + memcpy(dst, src, arch_task_struct_size); + #ifdef CONFIG_VM86 dst->thread.vm86 = NULL; #endif @@ -934,7 +939,7 @@ void __init select_idle_routine(void) static_call_update(x86_idle, mwait_idle); } else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) { pr_info("using TDX aware idle routine\n"); - static_call_update(x86_idle, tdx_safe_halt); + static_call_update(x86_idle, tdx_halt); } else { static_call_update(x86_idle, default_idle); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 2dbadf347b5f..5e3e036e6e53 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -379,6 +379,21 @@ __visible void __noreturn handle_stack_overflow(struct pt_regs *regs, } #endif
+/* + * Prevent the compiler and/or objtool from marking the !CONFIG_X86_ESPFIX64 + * version of exc_double_fault() as noreturn. Otherwise the noreturn mismatch + * between configs triggers objtool warnings. + * + * This is a temporary hack until we have compiler or plugin support for + * annotating noreturns. + */ +#ifdef CONFIG_X86_ESPFIX64 +#define always_true() true +#else +bool always_true(void); +bool __weak always_true(void) { return true; } +#endif + /* * Runs on an IST stack for x86_64 and on a special task stack for x86_32. * @@ -514,7 +529,8 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code); die("double fault", regs, error_code); - panic("Machine halted."); + if (always_true()) + panic("Machine halted."); instrumentation_end(); }
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 34dec0b72ea8..88e5a4ed9db3 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -959,7 +959,7 @@ static unsigned long long cyc2ns_suspend;
void tsc_save_sched_clock_state(void) { - if (!sched_clock_stable()) + if (!static_branch_likely(&__use_tsc) && !sched_clock_stable()) return;
cyc2ns_suspend = sched_clock(); @@ -979,7 +979,7 @@ void tsc_restore_sched_clock_state(void) unsigned long flags; int cpu;
- if (!sched_clock_stable()) + if (!static_branch_likely(&__use_tsc) && !sched_clock_stable()) return;
local_irq_save(flags); diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 5a952c5ea66b..9194695662b2 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -357,19 +357,23 @@ void *arch_uprobe_trampoline(unsigned long *psize) return &insn; }
-static unsigned long trampoline_check_ip(void) +static unsigned long trampoline_check_ip(unsigned long tramp) { - unsigned long tramp = uprobe_get_trampoline_vaddr(); - return tramp + (uretprobe_syscall_check - uretprobe_trampoline_entry); }
SYSCALL_DEFINE0(uretprobe) { struct pt_regs *regs = task_pt_regs(current); - unsigned long err, ip, sp, r11_cx_ax[3]; + unsigned long err, ip, sp, r11_cx_ax[3], tramp; + + /* If there's no trampoline, we are called from wrong place. */ + tramp = uprobe_get_trampoline_vaddr(); + if (unlikely(tramp == UPROBE_NO_TRAMPOLINE_VADDR)) + goto sigill;
- if (regs->ip != trampoline_check_ip()) + /* Make sure the ip matches the only allowed sys_uretprobe caller. */ + if (unlikely(regs->ip != trampoline_check_ip(tramp))) goto sigill;
err = copy_from_user(r11_cx_ax, (void __user *)regs->sp, sizeof(r11_cx_ax)); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 661108d65ee7..510901b8c369 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3969,16 +3969,12 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
/* * The target vCPU is valid, so the vCPU will be kicked unless the - * request is for CREATE_ON_INIT. For any errors at this stage, the - * kick will place the vCPU in an non-runnable state. + * request is for CREATE_ON_INIT. */ kick = true;
mutex_lock(&target_svm->sev_es.snp_vmsa_mutex);
- target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE; - target_svm->sev_es.snp_ap_waiting_for_reset = true; - /* Interrupt injection mode shouldn't change for AP creation */ if (request < SVM_VMGEXIT_AP_DESTROY) { u64 sev_features; @@ -4024,20 +4020,23 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm) target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2; break; case SVM_VMGEXIT_AP_DESTROY: + target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE; break; default: vcpu_unimpl(vcpu, "vmgexit: invalid AP creation request [%#x] from guest\n", request); ret = -EINVAL; - break; + goto out; }
-out: + target_svm->sev_es.snp_ap_waiting_for_reset = true; + if (kick) { kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu); kvm_vcpu_kick(target_vcpu); }
+out: mutex_unlock(&target_svm->sev_es.snp_vmsa_mutex);
return ret; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4b64ab350bcd..01d3fa84d2a4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4573,6 +4573,11 @@ static bool kvm_is_vm_type_supported(unsigned long type) return type < 32 && (kvm_caps.supported_vm_types & BIT(type)); }
+static inline u32 kvm_sync_valid_fields(struct kvm *kvm) +{ + return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS; +} + int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r = 0; @@ -4681,7 +4686,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; #endif case KVM_CAP_SYNC_REGS: - r = KVM_SYNC_X86_VALID_FIELDS; + r = kvm_sync_valid_fields(kvm); break; case KVM_CAP_ADJUST_CLOCK: r = KVM_CLOCK_VALID_FLAGS; @@ -11474,6 +11479,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) { struct kvm_queued_exception *ex = &vcpu->arch.exception; struct kvm_run *kvm_run = vcpu->run; + u32 sync_valid_fields; int r;
r = kvm_mmu_post_init_vm(vcpu->kvm); @@ -11519,8 +11525,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) goto out; }
- if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) || - (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) { + sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm); + if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) || + (kvm_run->kvm_dirty_regs & ~sync_valid_fields)) { r = -EINVAL; goto out; } @@ -11578,7 +11585,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
out: kvm_put_guest_fpu(vcpu); - if (kvm_run->kvm_valid_regs) + if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected)) store_regs(vcpu); post_kvm_run_save(vcpu); kvm_vcpu_srcu_read_unlock(vcpu); diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index fc9fb5d06174..b8f74d80f35c 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -74,6 +74,24 @@ SYM_FUNC_START(rep_movs_alternative) _ASM_EXTABLE_UA( 0b, 1b)
.Llarge_movsq: + /* Do the first possibly unaligned word */ +0: movq (%rsi),%rax +1: movq %rax,(%rdi) + + _ASM_EXTABLE_UA( 0b, .Lcopy_user_tail) + _ASM_EXTABLE_UA( 1b, .Lcopy_user_tail) + + /* What would be the offset to the aligned destination? */ + leaq 8(%rdi),%rax + andq $-8,%rax + subq %rdi,%rax + + /* .. and update pointers and count to match */ + addq %rax,%rdi + addq %rax,%rsi + subq %rax,%rcx + + /* make %rcx contain the number of words, %rax the remainder */ movq %rcx,%rax shrq $3,%rcx andl $7,%eax diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index e6c7686f443a..9fce5b87b8c5 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -565,7 +565,7 @@ void __head sme_enable(struct boot_params *bp) }
RIP_REL_REF(sme_me_mask) = me_mask; - physical_mask &= ~me_mask; - cc_vendor = CC_VENDOR_AMD; + RIP_REL_REF(physical_mask) &= ~me_mask; + RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD; cc_set_mask(me_mask); } diff --git a/arch/x86/mm/pat/cpa-test.c b/arch/x86/mm/pat/cpa-test.c index 3d2f7f0a6ed1..ad3c1feec990 100644 --- a/arch/x86/mm/pat/cpa-test.c +++ b/arch/x86/mm/pat/cpa-test.c @@ -183,7 +183,7 @@ static int pageattr_test(void) break;
case 1: - err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1); + err = change_page_attr_set(addrs, len[i], PAGE_CPA_TEST, 1); break;
case 2: diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index feb8cc6a12bf..d721cc19addb 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -984,29 +984,42 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, return -EINVAL; }
-/* - * track_pfn_copy is called when vma that is covering the pfnmap gets - * copied through copy_page_range(). - * - * If the vma has a linear pfn mapping for the entire range, we get the prot - * from pte and reserve the entire vma range with single reserve_pfn_range call. - */ -int track_pfn_copy(struct vm_area_struct *vma) +int track_pfn_copy(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, unsigned long *pfn) { + const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start; resource_size_t paddr; - unsigned long vma_size = vma->vm_end - vma->vm_start; pgprot_t pgprot; + int rc;
- if (vma->vm_flags & VM_PAT) { - if (get_pat_info(vma, &paddr, &pgprot)) - return -EINVAL; - /* reserve the whole chunk covered by vma. */ - return reserve_pfn_range(paddr, vma_size, &pgprot, 1); - } + if (!(src_vma->vm_flags & VM_PAT)) + return 0; + + /* + * Duplicate the PAT information for the dst VMA based on the src + * VMA. + */ + if (get_pat_info(src_vma, &paddr, &pgprot)) + return -EINVAL; + rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1); + if (rc) + return rc;
+ /* Reservation for the destination VMA succeeded. */ + vm_flags_set(dst_vma, VM_PAT); + *pfn = PHYS_PFN(paddr); return 0; }
+void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn) +{ + untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true); + /* + * Reservation was freed, any copied page tables will get cleaned + * up later, but without getting PAT involved again. + */ +} + /* * prot is passed in as a parameter for the new mapping. If the vma has * a linear pfn mapping for the entire range, or no vma is provided, @@ -1095,15 +1108,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, } }
-/* - * untrack_pfn_clear is called if the following situation fits: - * - * 1) while mremapping a pfnmap for a new region, with the old vma after - * its pfnmap page table has been removed. The new vma has a new pfnmap - * to the same pfn & cache type with VM_PAT set. - * 2) while duplicating vm area, the new vma fails to copy the pgtable from - * old vma. - */ void untrack_pfn_clear(struct vm_area_struct *vma) { vm_flags_clear(vma, VM_PAT); diff --git a/block/badblocks.c b/block/badblocks.c index db4ec8b9b2a8..dc147c017961 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -527,51 +527,6 @@ static int prev_badblocks(struct badblocks *bb, struct badblocks_context *bad, return ret; }
-/* - * Return 'true' if the range indicated by 'bad' can be backward merged - * with the bad range (from the bad table) index by 'behind'. - */ -static bool can_merge_behind(struct badblocks *bb, - struct badblocks_context *bad, int behind) -{ - sector_t sectors = bad->len; - sector_t s = bad->start; - u64 *p = bb->page; - - if ((s < BB_OFFSET(p[behind])) && - ((s + sectors) >= BB_OFFSET(p[behind])) && - ((BB_END(p[behind]) - s) <= BB_MAX_LEN) && - BB_ACK(p[behind]) == bad->ack) - return true; - return false; -} - -/* - * Do backward merge for range indicated by 'bad' and the bad range - * (from the bad table) indexed by 'behind'. The return value is merged - * sectors from bad->len. - */ -static int behind_merge(struct badblocks *bb, struct badblocks_context *bad, - int behind) -{ - sector_t sectors = bad->len; - sector_t s = bad->start; - u64 *p = bb->page; - int merged = 0; - - WARN_ON(s >= BB_OFFSET(p[behind])); - WARN_ON((s + sectors) < BB_OFFSET(p[behind])); - - if (s < BB_OFFSET(p[behind])) { - merged = BB_OFFSET(p[behind]) - s; - p[behind] = BB_MAKE(s, BB_LEN(p[behind]) + merged, bad->ack); - - WARN_ON((BB_LEN(p[behind]) + merged) >= BB_MAX_LEN); - } - - return merged; -} - /* * Return 'true' if the range indicated by 'bad' can be forward * merged with the bad range (from the bad table) indexed by 'prev'. @@ -745,7 +700,7 @@ static bool can_front_overwrite(struct badblocks *bb, int prev, *extra = 2; }
- if ((bb->count + (*extra)) >= MAX_BADBLOCKS) + if ((bb->count + (*extra)) > MAX_BADBLOCKS) return false;
return true; @@ -855,40 +810,60 @@ static void badblocks_update_acked(struct badblocks *bb) bb->unacked_exist = 0; }
+/* + * Return 'true' if the range indicated by 'bad' is exactly backward + * overlapped with the bad range (from bad table) indexed by 'behind'. + */ +static bool try_adjacent_combine(struct badblocks *bb, int prev) +{ + u64 *p = bb->page; + + if (prev >= 0 && (prev + 1) < bb->count && + BB_END(p[prev]) == BB_OFFSET(p[prev + 1]) && + (BB_LEN(p[prev]) + BB_LEN(p[prev + 1])) <= BB_MAX_LEN && + BB_ACK(p[prev]) == BB_ACK(p[prev + 1])) { + p[prev] = BB_MAKE(BB_OFFSET(p[prev]), + BB_LEN(p[prev]) + BB_LEN(p[prev + 1]), + BB_ACK(p[prev])); + + if ((prev + 2) < bb->count) + memmove(p + prev + 1, p + prev + 2, + (bb->count - (prev + 2)) * 8); + bb->count--; + return true; + } + return false; +} + /* Do exact work to set bad block range into the bad block table */ -static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, - int acknowledged) +static bool _badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors, + int acknowledged) { - int retried = 0, space_desired = 0; - int orig_len, len = 0, added = 0; + int len = 0, added = 0; struct badblocks_context bad; int prev = -1, hint = -1; - sector_t orig_start; unsigned long flags; - int rv = 0; u64 *p;
if (bb->shift < 0) /* badblocks are disabled */ - return 1; + return false;
if (sectors == 0) /* Invalid sectors number */ - return 1; + return false;
if (bb->shift) { /* round the start down, and the end up */ sector_t next = s + sectors;
- rounddown(s, bb->shift); - roundup(next, bb->shift); + rounddown(s, 1 << bb->shift); + roundup(next, 1 << bb->shift); sectors = next - s; }
write_seqlock_irqsave(&bb->lock, flags);
- orig_start = s; - orig_len = sectors; bad.ack = acknowledged; p = bb->page;
@@ -897,6 +872,9 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, bad.len = sectors; len = 0;
+ if (badblocks_full(bb)) + goto out; + if (badblocks_empty(bb)) { len = insert_at(bb, 0, &bad); bb->count++; @@ -908,32 +886,14 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
/* start before all badblocks */ if (prev < 0) { - if (!badblocks_full(bb)) { - /* insert on the first */ - if (bad.len > (BB_OFFSET(p[0]) - bad.start)) - bad.len = BB_OFFSET(p[0]) - bad.start; - len = insert_at(bb, 0, &bad); - bb->count++; - added++; - hint = 0; - goto update_sectors; - } - - /* No sapce, try to merge */ - if (overlap_behind(bb, &bad, 0)) { - if (can_merge_behind(bb, &bad, 0)) { - len = behind_merge(bb, &bad, 0); - added++; - } else { - len = BB_OFFSET(p[0]) - s; - space_desired = 1; - } - hint = 0; - goto update_sectors; - } - - /* no table space and give up */ - goto out; + /* insert on the first */ + if (bad.len > (BB_OFFSET(p[0]) - bad.start)) + bad.len = BB_OFFSET(p[0]) - bad.start; + len = insert_at(bb, 0, &bad); + bb->count++; + added++; + hint = ++prev; + goto update_sectors; }
/* in case p[prev-1] can be merged with p[prev] */ @@ -953,6 +913,9 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, int extra = 0;
if (!can_front_overwrite(bb, prev, &bad, &extra)) { + if (extra > 0) + goto out; + len = min_t(sector_t, BB_END(p[prev]) - s, sectors); hint = prev; @@ -979,24 +942,6 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, goto update_sectors; }
- /* if no space in table, still try to merge in the covered range */ - if (badblocks_full(bb)) { - /* skip the cannot-merge range */ - if (((prev + 1) < bb->count) && - overlap_behind(bb, &bad, prev + 1) && - ((s + sectors) >= BB_END(p[prev + 1]))) { - len = BB_END(p[prev + 1]) - s; - hint = prev + 1; - goto update_sectors; - } - - /* no retry any more */ - len = sectors; - space_desired = 1; - hint = -1; - goto update_sectors; - } - /* cannot merge and there is space in bad table */ if ((prev + 1) < bb->count && overlap_behind(bb, &bad, prev + 1)) @@ -1006,7 +951,7 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, len = insert_at(bb, prev + 1, &bad); bb->count++; added++; - hint = prev + 1; + hint = ++prev;
update_sectors: s += len; @@ -1015,35 +960,12 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, if (sectors > 0) goto re_insert;
- WARN_ON(sectors < 0); - /* * Check whether the following already set range can be * merged. (prev < 0) condition is not handled here, * because it's already complicated enough. */ - if (prev >= 0 && - (prev + 1) < bb->count && - BB_END(p[prev]) == BB_OFFSET(p[prev + 1]) && - (BB_LEN(p[prev]) + BB_LEN(p[prev + 1])) <= BB_MAX_LEN && - BB_ACK(p[prev]) == BB_ACK(p[prev + 1])) { - p[prev] = BB_MAKE(BB_OFFSET(p[prev]), - BB_LEN(p[prev]) + BB_LEN(p[prev + 1]), - BB_ACK(p[prev])); - - if ((prev + 2) < bb->count) - memmove(p + prev + 1, p + prev + 2, - (bb->count - (prev + 2)) * 8); - bb->count--; - } - - if (space_desired && !badblocks_full(bb)) { - s = orig_start; - sectors = orig_len; - space_desired = 0; - if (retried++ < 3) - goto re_insert; - } + try_adjacent_combine(bb, prev);
out: if (added) { @@ -1057,10 +979,7 @@ static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
write_sequnlock_irqrestore(&bb->lock, flags);
- if (!added) - rv = 1; - - return rv; + return sectors == 0; }
/* @@ -1131,21 +1050,20 @@ static int front_splitting_clear(struct badblocks *bb, int prev, }
/* Do the exact work to clear bad block range from the bad block table */ -static int _badblocks_clear(struct badblocks *bb, sector_t s, int sectors) +static bool _badblocks_clear(struct badblocks *bb, sector_t s, sector_t sectors) { struct badblocks_context bad; int prev = -1, hint = -1; int len = 0, cleared = 0; - int rv = 0; u64 *p;
if (bb->shift < 0) /* badblocks are disabled */ - return 1; + return false;
if (sectors == 0) /* Invalid sectors number */ - return 1; + return false;
if (bb->shift) { sector_t target; @@ -1157,8 +1075,8 @@ static int _badblocks_clear(struct badblocks *bb, sector_t s, int sectors) * isn't than to think a block is not bad when it is. */ target = s + sectors; - roundup(s, bb->shift); - rounddown(target, bb->shift); + roundup(s, 1 << bb->shift); + rounddown(target, 1 << bb->shift); sectors = target - s; }
@@ -1214,7 +1132,7 @@ static int _badblocks_clear(struct badblocks *bb, sector_t s, int sectors) if ((BB_OFFSET(p[prev]) < bad.start) && (BB_END(p[prev]) > (bad.start + bad.len))) { /* Splitting */ - if ((bb->count + 1) < MAX_BADBLOCKS) { + if ((bb->count + 1) <= MAX_BADBLOCKS) { len = front_splitting_clear(bb, prev, &bad); bb->count += 1; cleared++; @@ -1255,8 +1173,6 @@ static int _badblocks_clear(struct badblocks *bb, sector_t s, int sectors) if (sectors > 0) goto re_clear;
- WARN_ON(sectors < 0); - if (cleared) { badblocks_update_acked(bb); set_changed(bb); @@ -1265,40 +1181,21 @@ static int _badblocks_clear(struct badblocks *bb, sector_t s, int sectors) write_sequnlock_irq(&bb->lock);
if (!cleared) - rv = 1; + return false;
- return rv; + return true; }
/* Do the exact work to check bad blocks range from the bad block table */ -static int _badblocks_check(struct badblocks *bb, sector_t s, int sectors, - sector_t *first_bad, int *bad_sectors) +static int _badblocks_check(struct badblocks *bb, sector_t s, sector_t sectors, + sector_t *first_bad, sector_t *bad_sectors) { - int unacked_badblocks, acked_badblocks; int prev = -1, hint = -1, set = 0; struct badblocks_context bad; - unsigned int seq; + int unacked_badblocks = 0; + int acked_badblocks = 0; + u64 *p = bb->page; int len, rv; - u64 *p; - - WARN_ON(bb->shift < 0 || sectors == 0); - - if (bb->shift > 0) { - sector_t target; - - /* round the start down, and the end up */ - target = s + sectors; - rounddown(s, bb->shift); - roundup(target, bb->shift); - sectors = target - s; - } - -retry: - seq = read_seqbegin(&bb->lock); - - p = bb->page; - unacked_badblocks = 0; - acked_badblocks = 0;
re_check: bad.start = s; @@ -1364,9 +1261,6 @@ static int _badblocks_check(struct badblocks *bb, sector_t s, int sectors, else rv = 0;
- if (read_seqretry(&bb->lock, seq)) - goto retry; - return rv; }
@@ -1404,10 +1298,30 @@ static int _badblocks_check(struct badblocks *bb, sector_t s, int sectors, * -1: there are bad blocks which have not yet been acknowledged in metadata. * plus the start/length of the first bad section we overlap. */ -int badblocks_check(struct badblocks *bb, sector_t s, int sectors, - sector_t *first_bad, int *bad_sectors) +int badblocks_check(struct badblocks *bb, sector_t s, sector_t sectors, + sector_t *first_bad, sector_t *bad_sectors) { - return _badblocks_check(bb, s, sectors, first_bad, bad_sectors); + unsigned int seq; + int rv; + + WARN_ON(bb->shift < 0 || sectors == 0); + + if (bb->shift > 0) { + /* round the start down, and the end up */ + sector_t target = s + sectors; + + rounddown(s, 1 << bb->shift); + roundup(target, 1 << bb->shift); + sectors = target - s; + } + +retry: + seq = read_seqbegin(&bb->lock); + rv = _badblocks_check(bb, s, sectors, first_bad, bad_sectors); + if (read_seqretry(&bb->lock, seq)) + goto retry; + + return rv; } EXPORT_SYMBOL_GPL(badblocks_check);
@@ -1423,11 +1337,12 @@ EXPORT_SYMBOL_GPL(badblocks_check); * decide how best to handle it. * * Return: - * 0: success - * 1: failed to set badblocks (out of space) + * true: success + * false: failed to set badblocks (out of space). Parital setting will be + * treated as failure. */ -int badblocks_set(struct badblocks *bb, sector_t s, int sectors, - int acknowledged) +bool badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors, + int acknowledged) { return _badblocks_set(bb, s, sectors, acknowledged); } @@ -1444,10 +1359,10 @@ EXPORT_SYMBOL_GPL(badblocks_set); * drop the remove request. * * Return: - * 0: success - * 1: failed to clear badblocks + * true: success + * false: failed to clear badblocks */ -int badblocks_clear(struct badblocks *bb, sector_t s, int sectors) +bool badblocks_clear(struct badblocks *bb, sector_t s, sector_t sectors) { return _badblocks_clear(bb, s, sectors); } @@ -1479,6 +1394,11 @@ void ack_all_badblocks(struct badblocks *bb) p[i] = BB_MAKE(start, len, 1); } } + + for (i = 0; i < bb->count ; i++) + while (try_adjacent_combine(bb, i)) + ; + bb->unacked_exist = 0; } write_sequnlock_irq(&bb->lock); @@ -1564,10 +1484,10 @@ ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, return -EINVAL; }
- if (badblocks_set(bb, sector, length, !unack)) + if (!badblocks_set(bb, sector, length, !unack)) return -ENOSPC; - else - return len; + + return len; } EXPORT_SYMBOL_GPL(badblocks_store);
diff --git a/block/bio.c b/block/bio.c index 6ac5983ba51e..6deea10b2cd3 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1026,9 +1026,10 @@ EXPORT_SYMBOL(bio_add_page); void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, size_t off) { + unsigned long nr = off / PAGE_SIZE; + WARN_ON_ONCE(len > UINT_MAX); - WARN_ON_ONCE(off > UINT_MAX); - __bio_add_page(bio, &folio->page, len, off); + __bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE); } EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
@@ -1049,9 +1050,11 @@ EXPORT_SYMBOL_GPL(bio_add_folio_nofail); bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, size_t off) { - if (len > UINT_MAX || off > UINT_MAX) + unsigned long nr = off / PAGE_SIZE; + + if (len > UINT_MAX) return false; - return bio_add_page(bio, &folio->page, len, off) > 0; + return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0; } EXPORT_SYMBOL(bio_add_folio);
diff --git a/block/blk-settings.c b/block/blk-settings.c index b9c6f0ec1c49..66721afeea54 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -114,6 +114,7 @@ static int blk_validate_integrity_limits(struct queue_limits *lim) pr_warn("invalid PI settings.\n"); return -EINVAL; } + bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY; return 0; }
@@ -867,36 +868,28 @@ bool queue_limits_stack_integrity(struct queue_limits *t, if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) return true;
- if (!ti->tuple_size) { - /* inherit the settings from the first underlying device */ - if (!(ti->flags & BLK_INTEGRITY_STACKED)) { - ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE | - (bi->flags & BLK_INTEGRITY_REF_TAG); - ti->csum_type = bi->csum_type; - ti->tuple_size = bi->tuple_size; - ti->pi_offset = bi->pi_offset; - ti->interval_exp = bi->interval_exp; - ti->tag_size = bi->tag_size; - goto done; - } - if (!bi->tuple_size) - goto done; + if (ti->flags & BLK_INTEGRITY_STACKED) { + if (ti->tuple_size != bi->tuple_size) + goto incompatible; + if (ti->interval_exp != bi->interval_exp) + goto incompatible; + if (ti->tag_size != bi->tag_size) + goto incompatible; + if (ti->csum_type != bi->csum_type) + goto incompatible; + if ((ti->flags & BLK_INTEGRITY_REF_TAG) != + (bi->flags & BLK_INTEGRITY_REF_TAG)) + goto incompatible; + } else { + ti->flags = BLK_INTEGRITY_STACKED; + ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) | + (bi->flags & BLK_INTEGRITY_REF_TAG); + ti->csum_type = bi->csum_type; + ti->tuple_size = bi->tuple_size; + ti->pi_offset = bi->pi_offset; + ti->interval_exp = bi->interval_exp; + ti->tag_size = bi->tag_size; } - - if (ti->tuple_size != bi->tuple_size) - goto incompatible; - if (ti->interval_exp != bi->interval_exp) - goto incompatible; - if (ti->tag_size != bi->tag_size) - goto incompatible; - if (ti->csum_type != bi->csum_type) - goto incompatible; - if ((ti->flags & BLK_INTEGRITY_REF_TAG) != - (bi->flags & BLK_INTEGRITY_REF_TAG)) - goto incompatible; - -done: - ti->flags |= BLK_INTEGRITY_STACKED; return true;
incompatible: diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8d149aff9fd0..a52f0d6b40ad 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -599,14 +599,23 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) * sooner, then we need to reduce slice_end. A high bogus slice_end * is bad because it does not allow new slice to start. */ - throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
time_elapsed = rounddown(jiffies - tg->slice_start[rw], tg->td->throtl_slice); - if (!time_elapsed) + /* Don't trim slice until at least 2 slices are used */ + if (time_elapsed < tg->td->throtl_slice * 2) return;
+ /* + * The bio submission time may be a few jiffies more than the expected + * waiting time, due to 'extra_bytes' can't be divided in + * tg_within_bps_limit(), and also due to timer wakeup delay. In this + * case, adjust slice_start will discard the extra wait time, causing + * lower rate than expected. Therefore, other than the above rounddown, + * one extra slice is preserved for deviation. + */ + time_elapsed -= tg->td->throtl_slice; bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw), time_elapsed) + tg->carryover_bytes[rw]; diff --git a/crypto/algapi.c b/crypto/algapi.c index 5318c214debb..6120329eadad 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -464,8 +464,7 @@ void crypto_unregister_alg(struct crypto_alg *alg) if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1)) return;
- if (alg->cra_destroy) - alg->cra_destroy(alg); + crypto_alg_put(alg);
crypto_remove_final(&list); } diff --git a/crypto/api.c b/crypto/api.c index bfd177a4313a..c2c4eb14ef95 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -36,7 +36,8 @@ EXPORT_SYMBOL_GPL(crypto_chain); DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished); #endif
-static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, + u32 type, u32 mask); static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
@@ -145,7 +146,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, if (alg != &larval->alg) { kfree(larval); if (crypto_is_larval(alg)) - alg = crypto_larval_wait(alg); + alg = crypto_larval_wait(alg, type, mask); }
return alg; @@ -197,7 +198,8 @@ static void crypto_start_test(struct crypto_larval *larval) crypto_schedule_test(larval); }
-static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, + u32 type, u32 mask) { struct crypto_larval *larval; long time_left; @@ -219,12 +221,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) crypto_larval_kill(larval); alg = ERR_PTR(-ETIMEDOUT); } else if (!alg) { - u32 type; - u32 mask; - alg = &larval->alg; - type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); - mask = larval->mask; alg = crypto_alg_lookup(alg->cra_name, type, mask) ?: ERR_PTR(-EAGAIN); } else if (IS_ERR(alg)) @@ -304,7 +301,7 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, }
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg)) - alg = crypto_larval_wait(alg); + alg = crypto_larval_wait(alg, type, mask); else if (alg) ; else if (!(mask & CRYPTO_ALG_TESTED)) @@ -352,7 +349,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
if (ok == NOTIFY_STOP) - alg = crypto_larval_wait(larval); + alg = crypto_larval_wait(larval, type, mask); else { crypto_mod_put(larval); alg = ERR_PTR(-ENOENT); diff --git a/crypto/bpf_crypto_skcipher.c b/crypto/bpf_crypto_skcipher.c index b5e657415770..a88798d3e8c8 100644 --- a/crypto/bpf_crypto_skcipher.c +++ b/crypto/bpf_crypto_skcipher.c @@ -80,3 +80,4 @@ static void __exit bpf_crypto_skcipher_exit(void) module_init(bpf_crypto_skcipher_init); module_exit(bpf_crypto_skcipher_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Symmetric key cipher support for BPF"); diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c index 73388443c676..d303701b0ded 100644 --- a/drivers/accel/amdxdna/aie2_smu.c +++ b/drivers/accel/amdxdna/aie2_smu.c @@ -64,6 +64,7 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) if (ret) { XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n", ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret); + return ret; } ndev->npuclk_freq = freq;
@@ -72,6 +73,7 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) if (ret) { XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n", ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret); + return ret; } ndev->hclk_freq = freq; ndev->dpm_level = dpm_level; diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index a972831dbd66..064c7afba740 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -648,6 +648,13 @@ acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length
obj = buffer.pointer;
+ /* + * Some buggy implementations incorrectly return the EDID buffer in an ACPI package. + * In this case, extract the buffer from the package. + */ + if (obj && obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 1) + obj = &obj->package.elements[0]; + if (obj && obj->type == ACPI_TYPE_BUFFER) { *edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL); ret = *edid ? obj->buffer.length : -ENOMEM; @@ -657,7 +664,7 @@ acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length ret = -EFAULT; }
- kfree(obj); + kfree(buffer.pointer); return ret; }
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index a5d47819b3a4..ae035b93da08 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -485,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, cmd_mask = nd_desc->cmd_mask; if (cmd == ND_CMD_CALL && call_pkg->nd_family) { family = call_pkg->nd_family; - if (family > NVDIMM_BUS_FAMILY_MAX || + if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX || !test_bit(family, &nd_desc->bus_family_mask)) return -EINVAL; family = array_index_nospec(family, diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c index ef9444482db1..174a6439a412 100644 --- a/drivers/acpi/platform_profile.c +++ b/drivers/acpi/platform_profile.c @@ -289,14 +289,14 @@ static int _remove_hidden_choices(struct device *dev, void *arg)
/** * platform_profile_choices_show - Show the available profile choices for legacy sysfs interface - * @dev: The device + * @kobj: The kobject * @attr: The attribute * @buf: The buffer to write to * * Return: The number of bytes written */ -static ssize_t platform_profile_choices_show(struct device *dev, - struct device_attribute *attr, +static ssize_t platform_profile_choices_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { struct aggregate_choices_data data = { @@ -371,14 +371,14 @@ static int _store_and_notify(struct device *dev, void *data)
/** * platform_profile_show - Show the current profile for legacy sysfs interface - * @dev: The device + * @kobj: The kobject * @attr: The attribute * @buf: The buffer to write to * * Return: The number of bytes written */ -static ssize_t platform_profile_show(struct device *dev, - struct device_attribute *attr, +static ssize_t platform_profile_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { enum platform_profile_option profile = PLATFORM_PROFILE_LAST; @@ -400,15 +400,15 @@ static ssize_t platform_profile_show(struct device *dev,
/** * platform_profile_store - Set the profile for legacy sysfs interface - * @dev: The device + * @kobj: The kobject * @attr: The attribute * @buf: The buffer to read from * @count: The number of bytes to read * * Return: The number of bytes read */ -static ssize_t platform_profile_store(struct device *dev, - struct device_attribute *attr, +static ssize_t platform_profile_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) { struct aggregate_choices_data data = { @@ -442,12 +442,12 @@ static ssize_t platform_profile_store(struct device *dev, return count; }
-static DEVICE_ATTR_RO(platform_profile_choices); -static DEVICE_ATTR_RW(platform_profile); +static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices); +static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile);
static struct attribute *platform_profile_attrs[] = { - &dev_attr_platform_profile_choices.attr, - &dev_attr_platform_profile.attr, + &attr_platform_profile_choices.attr, + &attr_platform_profile.attr, NULL };
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 698897b29de2..2df1296ff44d 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -268,6 +268,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", pr->power.states[ACPI_STATE_C3].address);
+ if (!pr->power.states[ACPI_STATE_C2].address && + !pr->power.states[ACPI_STATE_C3].address) + return -ENODEV; + return 0; }
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index b4cd14e7fa76..14c7bac4100b 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -440,6 +440,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), }, }, + { + /* Asus Vivobook X1404VAP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"), + }, + }, { /* Asus Vivobook X1504VAP */ .matches = { diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 068c1612660b..4ee30c2897a2 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -374,7 +374,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | - ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { /* Medion Lifetab S10346 */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index d956735e2a76..3d730c10f7be 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2243,7 +2243,7 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) { ata_dev_warn(dev, - "NCQ Send/Recv Log not supported\n"); + "NCQ Non-Data Log not supported\n"); return; } err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA, diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index 8934e6ad5772..bedc6133f970 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -503,6 +503,7 @@ config HT16K33 config MAX6959 tristate "Maxim MAX6958/6959 7-segment LED controller" depends on I2C + select BITREVERSE select REGMAP_I2C select LINEDISP help diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index a731f28455b4..6dc8798d01f9 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1664,7 +1664,7 @@ static void panel_attach(struct parport *port) if (lcd.enabled) charlcd_unregister(lcd.charlcd); err_unreg_device: - kfree(lcd.charlcd); + charlcd_free(lcd.charlcd); lcd.charlcd = NULL; parport_unregister_device(pprt); pprt = NULL; @@ -1692,7 +1692,7 @@ static void panel_detach(struct parport *port) charlcd_unregister(lcd.charlcd); lcd.initialized = false; kfree(lcd.charlcd->drvdata); - kfree(lcd.charlcd); + charlcd_free(lcd.charlcd); lcd.charlcd = NULL; }
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 40e1d8d8a589..23be2d1b0407 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -929,6 +929,9 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete;
+ if (!dev->power.is_suspended) + goto Complete; + if (dev->power.direct_complete) { /* Match the pm_runtime_disable() in device_suspend(). */ pm_runtime_enable(dev); @@ -947,9 +950,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) */ dev->power.is_prepared = false;
- if (!dev->power.is_suspended) - goto Unlock; - if (dev->pm_domain) { info = "power domain "; callback = pm_op(&dev->pm_domain->ops, state); @@ -989,7 +989,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) error = dpm_run_callback(callback, dev, state, info); dev->power.is_suspended = false;
- Unlock: device_unlock(dev); dpm_watchdog_clear(&wd);
@@ -1270,14 +1269,13 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy dev->power.is_noirq_suspended = true;
/* - * Skipping the resume of devices that were in use right before the - * system suspend (as indicated by their PM-runtime usage counters) - * would be suboptimal. Also resume them if doing that is not allowed - * to be skipped. + * Devices must be resumed unless they are explicitly allowed to be left + * in suspend, but even in that case skipping the resume of devices that + * were in use right before the system suspend (as indicated by their + * runtime PM usage counters and child counters) would be suboptimal. */ - if (atomic_read(&dev->power.usage_count) > 1 || - !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && - dev->power.may_skip_resume)) + if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && + dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev)) dev->power.must_resume = true;
if (dev->power.must_resume) { @@ -1650,6 +1648,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) pm_runtime_disable(dev); if (pm_runtime_status_suspended(dev)) { pm_dev_dbg(dev, state, "direct-complete "); + dev->power.is_suspended = true; goto Complete; }
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 2ee45841486b..04113adb092b 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1874,7 +1874,7 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); }
-static bool pm_runtime_need_not_resume(struct device *dev) +bool pm_runtime_need_not_resume(struct device *dev) { return atomic_read(&dev->power.usage_count) <= 1 && (atomic_read(&dev->power.child_count) == 0 || diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index fdc7a0b2af10..175566a71bb3 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -559,14 +559,14 @@ static ssize_t nullb_device_badblocks_store(struct config_item *item, goto out; /* enable badblocks */ cmpxchg(&t_dev->badblocks.shift, -1, 0); - if (buf[0] == '+') - ret = badblocks_set(&t_dev->badblocks, start, - end - start + 1, 1); - else - ret = badblocks_clear(&t_dev->badblocks, start, - end - start + 1); - if (ret == 0) + if (buf[0] == '+') { + if (badblocks_set(&t_dev->badblocks, start, + end - start + 1, 1)) + ret = count; + } else if (badblocks_clear(&t_dev->badblocks, start, + end - start + 1)) { ret = count; + } out: kfree(orig); return ret; @@ -1300,8 +1300,7 @@ static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t nr_sectors) { struct badblocks *bb = &cmd->nq->dev->badblocks; - sector_t first_bad; - int bad_sectors; + sector_t first_bad, bad_sectors;
if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors)) return BLK_STS_IOERR; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index ca9a67b5b537..b7adfaddc3ab 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1452,17 +1452,27 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) } }
+/* Must be called when queue is frozen */ +static bool ublk_mark_queue_canceling(struct ublk_queue *ubq) +{ + bool canceled; + + spin_lock(&ubq->cancel_lock); + canceled = ubq->canceling; + if (!canceled) + ubq->canceling = true; + spin_unlock(&ubq->cancel_lock); + + return canceled; +} + static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq) { + bool was_canceled = ubq->canceling; struct gendisk *disk;
- spin_lock(&ubq->cancel_lock); - if (ubq->canceling) { - spin_unlock(&ubq->cancel_lock); + if (was_canceled) return false; - } - ubq->canceling = true; - spin_unlock(&ubq->cancel_lock);
spin_lock(&ub->lock); disk = ub->ub_disk; @@ -1474,14 +1484,23 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq) if (!disk) return false;
- /* Now we are serialized with ublk_queue_rq() */ + /* + * Now we are serialized with ublk_queue_rq() + * + * Make sure that ubq->canceling is set when queue is frozen, + * because ublk_queue_rq() has to rely on this flag for avoiding to + * touch completed uring_cmd + */ blk_mq_quiesce_queue(disk->queue); - /* abort queue is for making forward progress */ - ublk_abort_queue(ub, ubq); + was_canceled = ublk_mark_queue_canceling(ubq); + if (!was_canceled) { + /* abort queue is for making forward progress */ + ublk_abort_queue(ub, ubq); + } blk_mq_unquiesce_queue(disk->queue); put_device(disk_to_dev(disk));
- return true; + return !was_canceled; }
static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io, diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index aa5ec1d444a9..6d66668d670a 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -651,8 +651,10 @@ static int nxp_download_firmware(struct hci_dev *hdev) &nxpdev->tx_state), msecs_to_jiffies(60000));
- release_firmware(nxpdev->fw); - memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); + if (nxpdev->fw && strlen(nxpdev->fw_name)) { + release_firmware(nxpdev->fw); + memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); + }
if (err == 0) { bt_dev_err(hdev, "FW Download Timeout. offset: %d", diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index a0fc465458b2..699ff21d9767 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2477,6 +2477,8 @@ static int btusb_setup_csr(struct hci_dev *hdev) set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks); set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks); set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks);
/* Clear the reset quirk since this is not an actual * early Bluetooth 1.1 device from CSR. diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c index 85d781a32df4..7f5fd4e0940d 100644 --- a/drivers/bus/qcom-ssc-block-bus.c +++ b/drivers/bus/qcom-ssc-block-bus.c @@ -264,18 +264,6 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- data->pd_names = qcom_ssc_block_pd_names; - data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names); - - /* power domains */ - ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds); - if (ret < 0) - return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n"); - - ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds); - if (ret < 0) - return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n"); - /* low level overrides for when the HW logic doesn't "just work" */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0"); data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res); @@ -343,11 +331,30 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
data->ssc_axi_halt = halt_args.args[0];
+ /* power domains */ + data->pd_names = qcom_ssc_block_pd_names; + data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names); + + ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n"); + + ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds); + if (ret < 0) { + dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n"); + goto err_detach_pds_bus; + } + qcom_ssc_block_bus_init(&pdev->dev);
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0; + +err_detach_pds_bus: + qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds); + + return ret; }
static void qcom_ssc_block_bus_remove(struct platform_device *pdev) @@ -356,9 +363,6 @@ static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
qcom_ssc_block_bus_deinit(&pdev->dev);
- iounmap(data->reg_mpm_sscaon_config0); - iounmap(data->reg_mpm_sscaon_config1); - qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds); qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds); pm_runtime_disable(&pdev->dev); diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index f476883bc93b..85e23961ec34 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -888,7 +888,6 @@ static int __init stm32f4_pll_ssc_parse_dt(struct device_node *np, struct stm32f4_pll_ssc *conf) { int ret; - const char *s;
if (!conf) return -EINVAL; @@ -916,7 +915,8 @@ static int __init stm32f4_pll_ssc_parse_dt(struct device_node *np, conf->mod_type = ret;
pr_debug("%pOF: SSCG settings: mod_freq: %d, mod_depth: %d mod_method: %s [%d]\n", - np, conf->mod_freq, conf->mod_depth, s, conf->mod_type); + np, conf->mod_freq, conf->mod_depth, + stm32f4_ssc_mod_methods[ret], conf->mod_type);
return 0; } diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c index c409fc7e0618..775f62dddb11 100644 --- a/drivers/clk/imx/clk-imx8mp-audiomix.c +++ b/drivers/clk/imx/clk-imx8mp-audiomix.c @@ -180,14 +180,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = { CLK_GATE("asrc", ASRC_IPG), CLK_GATE("pdm", PDM_IPG), CLK_GATE("earc", EARC_IPG), - CLK_GATE("ocrama", OCRAMA_IPG), + CLK_GATE_PARENT("ocrama", OCRAMA_IPG, "axi"), CLK_GATE("aud2htx", AUD2HTX_IPG), CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"), CLK_GATE("sdma2", SDMA2_ROOT), CLK_GATE("sdma3", SDMA3_ROOT), CLK_GATE("spba2", SPBA2_ROOT), - CLK_GATE("dsp", DSP_ROOT), - CLK_GATE("dspdbg", DSPDBG_ROOT), + CLK_GATE_PARENT("dsp", DSP_ROOT, "axi"), + CLK_GATE_PARENT("dspdbg", DSPDBG_ROOT, "axi"), CLK_GATE("edma", EDMA_ROOT), CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"), CLK_GATE("mu2", MU2_ROOT), diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index cfffd434e998..ceabebb1863d 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -1137,8 +1137,18 @@ static struct clk_regmap g12a_cpu_clk_div16_en = { .hw.init = &(struct clk_init_data) { .name = "cpu_clk_div16_en", .ops = &clk_regmap_gate_ro_ops, - .parent_hws = (const struct clk_hw *[]) { - &g12a_cpu_clk.hw + .parent_data = &(const struct clk_parent_data) { + /* + * Note: + * G12A and G12B have different cpu clocks (with + * different struct clk_hw). We fallback to the global + * naming string mechanism so this clock picks + * up the appropriate one. Same goes for the other + * clock using cpu cluster A clock output and present + * on both G12 variant. + */ + .name = "cpu_clk", + .index = -1, }, .num_parents = 1, /* @@ -1203,7 +1213,10 @@ static struct clk_regmap g12a_cpu_clk_apb_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_apb_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1237,7 +1250,10 @@ static struct clk_regmap g12a_cpu_clk_atb_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_atb_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1271,7 +1287,10 @@ static struct clk_regmap g12a_cpu_clk_axi_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_axi_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1306,13 +1325,6 @@ static struct clk_regmap g12a_cpu_clk_trace_div = { .name = "cpu_clk_trace_div", .ops = &clk_regmap_divider_ro_ops, .parent_data = &(const struct clk_parent_data) { - /* - * Note: - * G12A and G12B have different cpu_clks (with - * different struct clk_hw). We fallback to the global - * naming string mechanism so cpu_clk_trace_div picks - * up the appropriate one. - */ .name = "cpu_clk", .index = -1, }, @@ -4311,7 +4323,7 @@ static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14); static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19); static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20); static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23); -static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4); +static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24); static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25); static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26); static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28); diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 8575b8485385..3abb44a2532b 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -1266,14 +1266,13 @@ static struct clk_regmap gxbb_cts_i958 = { }, };
+/* + * This table skips a clock named 'cts_slow_oscin' in the documentation + * This clock does not exist yet in this controller or the AO one + */ +static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 }; static const struct clk_parent_data gxbb_32k_clk_parent_data[] = { { .fw_name = "xtal", }, - /* - * FIXME: This clock is provided by the ao clock controller but the - * clock is not yet part of the binding of this controller, so string - * name must be use to set this parent. - */ - { .name = "cts_slow_oscin", .index = -1 }, { .hw = &gxbb_fclk_div3.hw }, { .hw = &gxbb_fclk_div5.hw }, }; @@ -1283,6 +1282,7 @@ static struct clk_regmap gxbb_32k_clk_sel = { .offset = HHI_32K_CLK_CNTL, .mask = 0x3, .shift = 16, + .table = gxbb_32k_clk_parents_val_table, }, .hw.init = &(struct clk_init_data){ .name = "32k_clk_sel", @@ -1306,7 +1306,7 @@ static struct clk_regmap gxbb_32k_clk_div = { &gxbb_32k_clk_sel.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST, + .flags = CLK_SET_RATE_PARENT, }, };
diff --git a/drivers/clk/mmp/clk-pxa1908-apmu.c b/drivers/clk/mmp/clk-pxa1908-apmu.c index 8cfb1258202f..d3a070687fc5 100644 --- a/drivers/clk/mmp/clk-pxa1908-apmu.c +++ b/drivers/clk/mmp/clk-pxa1908-apmu.c @@ -87,8 +87,8 @@ static int pxa1908_apmu_probe(struct platform_device *pdev) struct pxa1908_clk_unit *pxa_unit;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL); - if (IS_ERR(pxa_unit)) - return PTR_ERR(pxa_unit); + if (!pxa_unit) + return -ENOMEM;
pxa_unit->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pxa_unit->base)) diff --git a/drivers/clk/qcom/gcc-ipq5424.c b/drivers/clk/qcom/gcc-ipq5424.c index d5b218b76e29..3d42f3d85c7a 100644 --- a/drivers/clk/qcom/gcc-ipq5424.c +++ b/drivers/clk/qcom/gcc-ipq5424.c @@ -592,13 +592,19 @@ static struct clk_rcg2 gcc_qupv3_spi1_clk_src = { };
static const struct freq_tbl ftbl_gcc_qupv3_uart0_clk_src[] = { - F(960000, P_XO, 10, 2, 5), - F(4800000, P_XO, 5, 0, 0), - F(9600000, P_XO, 2, 4, 5), - F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5), + F(3686400, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 144, 15625), + F(7372800, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 288, 15625), + F(14745600, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 576, 15625), F(24000000, P_XO, 1, 0, 0), F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2), - F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(32000000, P_GPLL0_OUT_MAIN, 1, 1, 25), + F(40000000, P_GPLL0_OUT_MAIN, 1, 1, 20), + F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 500), + F(48000000, P_GPLL0_OUT_MAIN, 1, 3, 50), + F(51200000, P_GPLL0_OUT_MAIN, 1, 8, 125), + F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 100), + F(58982400, P_GPLL0_OUT_MAIN, 1, 1152, 15625), + F(60000000, P_GPLL0_OUT_MAIN, 1, 3, 40), F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0), { } }; @@ -634,11 +640,11 @@ static struct clk_rcg2 gcc_qupv3_uart1_clk_src = { static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = { F(144000, P_XO, 16, 12, 125), F(400000, P_XO, 12, 1, 5), - F(24000000, P_XO, 1, 0, 0), - F(48000000, P_GPLL2_OUT_MAIN, 12, 1, 2), - F(96000000, P_GPLL2_OUT_MAIN, 6, 1, 2), + F(24000000, P_GPLL2_OUT_MAIN, 12, 1, 2), + F(48000000, P_GPLL2_OUT_MAIN, 12, 0, 0), + F(96000000, P_GPLL2_OUT_MAIN, 6, 0, 0), F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0), - F(192000000, P_GPLL2_OUT_MAIN, 6, 0, 0), + F(192000000, P_GPLL2_OUT_MAIN, 3, 0, 0), F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), { } }; diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c index 855a61966f3e..8f29ecc74c50 100644 --- a/drivers/clk/qcom/gcc-msm8953.c +++ b/drivers/clk/qcom/gcc-msm8953.c @@ -3770,7 +3770,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
static struct clk_branch gcc_venus0_core0_vcodec0_clk = { .halt_reg = 0x4c02c, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x4c02c, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c index 9dd5c48f33be..fa1672c4e7d8 100644 --- a/drivers/clk/qcom/gcc-sm8650.c +++ b/drivers/clk/qcom/gcc-sm8650.c @@ -3497,7 +3497,7 @@ static struct gdsc usb30_prim_gdsc = { .pd = { .name = "usb30_prim_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, };
@@ -3506,7 +3506,7 @@ static struct gdsc usb3_phy_gdsc = { .pd = { .name = "usb3_phy_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, };
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c index 7288af845434..009f39139b64 100644 --- a/drivers/clk/qcom/gcc-x1e80100.c +++ b/drivers/clk/qcom/gcc-x1e80100.c @@ -2564,19 +2564,6 @@ static struct clk_branch gcc_disp_hf_axi_clk = { }, };
-static struct clk_branch gcc_disp_xo_clk = { - .halt_reg = 0x27018, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x27018, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_disp_xo_clk", - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_gp1_clk = { .halt_reg = 0x64000, .halt_check = BRANCH_HALT, @@ -2631,21 +2618,6 @@ static struct clk_branch gcc_gp3_clk = { }, };
-static struct clk_branch gcc_gpu_cfg_ahb_clk = { - .halt_reg = 0x71004, - .halt_check = BRANCH_HALT_VOTED, - .hwcg_reg = 0x71004, - .hwcg_bit = 1, - .clkr = { - .enable_reg = 0x71004, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_gpu_cfg_ahb_clk", - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_gpu_gpll0_cph_clk_src = { .halt_check = BRANCH_HALT_DELAY, .clkr = { @@ -6268,7 +6240,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = { [GCC_CNOC_PCIE_TUNNEL_CLK] = &gcc_cnoc_pcie_tunnel_clk.clkr, [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, - [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr, [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, @@ -6281,7 +6252,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = { [GCC_GPLL7] = &gcc_gpll7.clkr, [GCC_GPLL8] = &gcc_gpll8.clkr, [GCC_GPLL9] = &gcc_gpll9.clkr, - [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr, [GCC_GPU_GPLL0_CPH_CLK_SRC] = &gcc_gpu_gpll0_cph_clk_src.clkr, [GCC_GPU_GPLL0_DIV_CPH_CLK_SRC] = &gcc_gpu_gpll0_div_cph_clk_src.clkr, [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c index 98ba5b4518fb..b9f02d91004e 100644 --- a/drivers/clk/qcom/mmcc-sdm660.c +++ b/drivers/clk/qcom/mmcc-sdm660.c @@ -2544,7 +2544,7 @@ static struct clk_branch video_core_clk = {
static struct clk_branch video_subcore0_clk = { .halt_reg = 0x1048, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x1048, .enable_mask = BIT(0), diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c index 0e7e3bf05b52..cb63d397429f 100644 --- a/drivers/clk/renesas/r9a08g045-cpg.c +++ b/drivers/clk/renesas/r9a08g045-cpg.c @@ -51,7 +51,7 @@ #define G3S_SEL_SDHI2 SEL_PLL_PACK(G3S_CPG_SDHI_DSEL, 8, 2)
/* PLL 1/4/6 configuration registers macro. */ -#define G3S_PLL146_CONF(clk1, clk2) ((clk1) << 22 | (clk2) << 12) +#define G3S_PLL146_CONF(clk1, clk2, setting) ((clk1) << 22 | (clk2) << 12 | (setting))
#define DEF_G3S_MUX(_name, _id, _conf, _parent_names, _mux_flags, _clk_flags) \ DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = (_conf), \ @@ -134,7 +134,8 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
/* Internal Core Clocks */ DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000), - DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8)), + DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8, 0x100), + 1100000000UL), DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3), DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3), DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 100, 3), diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index ddf722ca79eb..4bd8862dc82b 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -51,6 +51,7 @@ #define RZG3S_DIV_M GENMASK(25, 22) #define RZG3S_DIV_NI GENMASK(21, 13) #define RZG3S_DIV_NF GENMASK(12, 1) +#define RZG3S_SEL_PLL BIT(0)
#define CLK_ON_R(reg) (reg) #define CLK_MON_R(reg) (0x180 + (reg)) @@ -60,6 +61,7 @@ #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff) #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff) +#define GET_REG_SAMPLL_SETTING(val) ((val) & 0xfff)
#define CPG_WEN_BIT BIT(16)
@@ -943,6 +945,7 @@ rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
struct pll_clk { struct clk_hw hw; + unsigned long default_rate; unsigned int conf; unsigned int type; void __iomem *base; @@ -980,12 +983,19 @@ static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw, { struct pll_clk *pll_clk = to_pll(hw); struct rzg2l_cpg_priv *priv = pll_clk->priv; - u32 nir, nfr, mr, pr, val; + u32 nir, nfr, mr, pr, val, setting; u64 rate;
if (pll_clk->type != CLK_TYPE_G3S_PLL) return parent_rate;
+ setting = GET_REG_SAMPLL_SETTING(pll_clk->conf); + if (setting) { + val = readl(priv->base + setting); + if (val & RZG3S_SEL_PLL) + return pll_clk->default_rate; + } + val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
pr = 1 << FIELD_GET(RZG3S_DIV_P, val); @@ -1038,6 +1048,7 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core, pll_clk->base = priv->base; pll_clk->priv = priv; pll_clk->type = core->type; + pll_clk->default_rate = core->default_rate;
ret = devm_clk_hw_register(dev, &pll_clk->hw); if (ret) diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h index 881a89b5a710..b74c94a16986 100644 --- a/drivers/clk/renesas/rzg2l-cpg.h +++ b/drivers/clk/renesas/rzg2l-cpg.h @@ -102,7 +102,10 @@ struct cpg_core_clk { const struct clk_div_table *dtable; const u32 *mtable; const unsigned long invalid_rate; - const unsigned long max_rate; + union { + const unsigned long max_rate; + const unsigned long default_rate; + }; const char * const *parent_names; notifier_fn_t notifier; u32 flag; @@ -144,8 +147,9 @@ enum clk_types { DEF_TYPE(_name, _id, _type, .parent = _parent) #define DEF_SAMPLL(_name, _id, _parent, _conf) \ DEF_TYPE(_name, _id, CLK_TYPE_SAM_PLL, .parent = _parent, .conf = _conf) -#define DEF_G3S_PLL(_name, _id, _parent, _conf) \ - DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf) +#define DEF_G3S_PLL(_name, _id, _parent, _conf, _default_rate) \ + DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf, \ + .default_rate = _default_rate) #define DEF_INPUT(_name, _id) \ DEF_TYPE(_name, _id, CLK_TYPE_IN) #define DEF_FIXED(_name, _id, _parent, _mult, _div) \ diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index 3bb87b27b662..cf60fcf2fa5c 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -201,7 +201,7 @@ PNAME(mux_aclk_peri_pre_p) = { "cpll_peri", "gpll_peri", "hdmiphy_peri" }; PNAME(mux_ref_usb3otg_src_p) = { "xin24m", - "clk_usb3otg_ref" }; + "clk_ref_usb3otg_src" }; PNAME(mux_xin24m_32k_p) = { "xin24m", "clk_rtc32k" }; PNAME(mux_mac2io_src_p) = { "clk_mac2io_src", diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index 283c523763e6..8d440cf56bd4 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -74,12 +74,12 @@ struct samsung_clk_provider * __init samsung_clk_init(struct device *dev, if (!ctx) panic("could not allocate clock provider context.\n");
+ ctx->clk_data.num = nr_clks; for (i = 0; i < nr_clks; ++i) ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
ctx->dev = dev; ctx->reg_base = base; - ctx->clk_data.num = nr_clks; spin_lock_init(&ctx->lock);
return ctx; diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 9e46960f6a86..4f9cb943d945 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -254,7 +254,7 @@ config ARM_TEGRA186_CPUFREQ
config ARM_TEGRA194_CPUFREQ tristate "Tegra194 CPUFreq support" - depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST) + depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST) depends on TEGRA_BPMP default y help diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h index 8d692415d905..f457d4af2c62 100644 --- a/drivers/cpufreq/amd-pstate-trace.h +++ b/drivers/cpufreq/amd-pstate-trace.h @@ -24,9 +24,9 @@
TRACE_EVENT(amd_pstate_perf,
- TP_PROTO(unsigned long min_perf, - unsigned long target_perf, - unsigned long capacity, + TP_PROTO(u8 min_perf, + u8 target_perf, + u8 capacity, u64 freq, u64 mperf, u64 aperf, @@ -47,9 +47,9 @@ TRACE_EVENT(amd_pstate_perf, ),
TP_STRUCT__entry( - __field(unsigned long, min_perf) - __field(unsigned long, target_perf) - __field(unsigned long, capacity) + __field(u8, min_perf) + __field(u8, target_perf) + __field(u8, capacity) __field(unsigned long long, freq) __field(unsigned long long, mperf) __field(unsigned long long, aperf) @@ -70,10 +70,10 @@ TRACE_EVENT(amd_pstate_perf, __entry->fast_switch = fast_switch; ),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s", - (unsigned long)__entry->min_perf, - (unsigned long)__entry->target_perf, - (unsigned long)__entry->capacity, + TP_printk("amd_min_perf=%hhu amd_des_perf=%hhu amd_max_perf=%hhu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s", + (u8)__entry->min_perf, + (u8)__entry->target_perf, + (u8)__entry->capacity, (unsigned long long)__entry->freq, (unsigned long long)__entry->mperf, (unsigned long long)__entry->aperf, @@ -86,10 +86,10 @@ TRACE_EVENT(amd_pstate_perf, TRACE_EVENT(amd_pstate_epp_perf,
TP_PROTO(unsigned int cpu_id, - unsigned int highest_perf, - unsigned int epp, - unsigned int min_perf, - unsigned int max_perf, + u8 highest_perf, + u8 epp, + u8 min_perf, + u8 max_perf, bool boost ),
@@ -102,10 +102,10 @@ TRACE_EVENT(amd_pstate_epp_perf,
TP_STRUCT__entry( __field(unsigned int, cpu_id) - __field(unsigned int, highest_perf) - __field(unsigned int, epp) - __field(unsigned int, min_perf) - __field(unsigned int, max_perf) + __field(u8, highest_perf) + __field(u8, epp) + __field(u8, min_perf) + __field(u8, max_perf) __field(bool, boost) ),
@@ -118,12 +118,12 @@ TRACE_EVENT(amd_pstate_epp_perf, __entry->boost = boost; ),
- TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u", + TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u", (unsigned int)__entry->cpu_id, - (unsigned int)__entry->min_perf, - (unsigned int)__entry->max_perf, - (unsigned int)__entry->highest_perf, - (unsigned int)__entry->epp, + (u8)__entry->min_perf, + (u8)__entry->max_perf, + (u8)__entry->highest_perf, + (u8)__entry->epp, (bool)__entry->boost ) ); diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 313550fa62d4..bd63837eabb4 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -186,7 +186,7 @@ static inline int get_mode_idx_from_str(const char *str, size_t size) static DEFINE_MUTEX(amd_pstate_limits_lock); static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 msr_get_epp(struct amd_cpudata *cpudata) +static u8 msr_get_epp(struct amd_cpudata *cpudata) { u64 value; int ret; @@ -207,7 +207,7 @@ static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata) return static_call(amd_pstate_get_epp)(cpudata); }
-static s16 shmem_get_epp(struct amd_cpudata *cpudata) +static u8 shmem_get_epp(struct amd_cpudata *cpudata) { u64 epp; int ret; @@ -218,11 +218,11 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata) return ret; }
- return (s16)(epp & 0xff); + return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp); }
-static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf, - u32 des_perf, u32 max_perf, u32 epp, bool fast_switch) +static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf, + u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) { u64 value, prev;
@@ -257,15 +257,15 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf, DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata, - u32 min_perf, u32 des_perf, - u32 max_perf, u32 epp, + u8 min_perf, u8 des_perf, + u8 max_perf, u8 epp, bool fast_switch) { return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, max_perf, epp, fast_switch); }
-static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp) +static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) { u64 value, prev; int ret; @@ -292,12 +292,12 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
-static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) +static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u8 epp) { return static_call(amd_pstate_set_epp)(cpudata, epp); }
-static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp) +static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp) { int ret; struct cppc_perf_ctrls perf_ctrls; @@ -320,7 +320,7 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy, int pref_index) { struct amd_cpudata *cpudata = policy->driver_data; - int epp; + u8 epp;
if (!pref_index) epp = cpudata->epp_default; @@ -479,8 +479,8 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) return static_call(amd_pstate_init_perf)(cpudata); }
-static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf, - u32 des_perf, u32 max_perf, u32 epp, bool fast_switch) +static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf, + u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) { struct cppc_perf_ctrls perf_ctrls;
@@ -531,14 +531,17 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) return true; }
-static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, - u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags) +static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf, + u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags) { unsigned long max_freq; struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu); - u32 nominal_perf = READ_ONCE(cpudata->nominal_perf); + u8 nominal_perf = READ_ONCE(cpudata->nominal_perf);
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); + if (!policy) + return; + + des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
max_freq = READ_ONCE(cpudata->max_limit_freq); policy->cur = div_u64(des_perf * max_freq, max_perf); @@ -550,7 +553,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
/* limit the max perf when core performance boost feature is disabled */ if (!cpudata->boost_supported) - max_perf = min_t(unsigned long, nominal_perf, max_perf); + max_perf = min_t(u8, nominal_perf, max_perf);
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) { trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq, @@ -591,7 +594,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) { - u32 max_limit_perf, min_limit_perf, max_perf, max_freq; + u8 max_limit_perf, min_limit_perf, max_perf; + u32 max_freq; struct amd_cpudata *cpudata = policy->driver_data;
max_perf = READ_ONCE(cpudata->highest_perf); @@ -615,7 +619,7 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy, { struct cpufreq_freqs freqs; struct amd_cpudata *cpudata = policy->driver_data; - unsigned long max_perf, min_perf, des_perf, cap_perf; + u8 des_perf, cap_perf;
if (!cpudata->max_freq) return -ENODEV; @@ -624,8 +628,6 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy, amd_pstate_update_min_max_limit(policy);
cap_perf = READ_ONCE(cpudata->highest_perf); - min_perf = READ_ONCE(cpudata->lowest_perf); - max_perf = cap_perf;
freqs.old = policy->cur; freqs.new = target_freq; @@ -642,8 +644,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy, if (!fast_switch) cpufreq_freq_transition_begin(policy, &freqs);
- amd_pstate_update(cpudata, min_perf, des_perf, - max_perf, fast_switch, policy->governor->flags); + amd_pstate_update(cpudata, cpudata->min_limit_perf, des_perf, + cpudata->max_limit_perf, fast_switch, + policy->governor->flags);
if (!fast_switch) cpufreq_freq_transition_end(policy, &freqs, false); @@ -671,8 +674,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu, unsigned long target_perf, unsigned long capacity) { - unsigned long max_perf, min_perf, des_perf, - cap_perf, lowest_nonlinear_perf; + u8 max_perf, min_perf, des_perf, cap_perf, min_limit_perf; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct amd_cpudata *cpudata;
@@ -684,20 +686,20 @@ static void amd_pstate_adjust_perf(unsigned int cpu, if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) amd_pstate_update_min_max_limit(policy);
- cap_perf = READ_ONCE(cpudata->highest_perf); - lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); + min_limit_perf = READ_ONCE(cpudata->min_limit_perf);
des_perf = cap_perf; if (target_perf < capacity) des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->lowest_perf); if (_min_perf < capacity) min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); + else + min_perf = cap_perf;
- if (min_perf < lowest_nonlinear_perf) - min_perf = lowest_nonlinear_perf; + if (min_perf < min_limit_perf) + min_perf = min_limit_perf;
max_perf = cpudata->max_limit_perf; if (max_perf < min_perf) @@ -908,8 +910,8 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata) { int ret; u32 min_freq, max_freq; - u32 highest_perf, nominal_perf, nominal_freq; - u32 lowest_nonlinear_perf, lowest_nonlinear_freq; + u8 highest_perf, nominal_perf, lowest_nonlinear_perf; + u32 nominal_freq, lowest_nonlinear_freq; struct cppc_perf_caps cppc_perf;
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); @@ -1116,7 +1118,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy, char *buf) { - u32 perf; + u8 perf; struct amd_cpudata *cpudata = policy->driver_data;
perf = READ_ONCE(cpudata->highest_perf); @@ -1127,7 +1129,7 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy, static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy, char *buf) { - u32 perf; + u8 perf; struct amd_cpudata *cpudata = policy->driver_data;
perf = READ_ONCE(cpudata->prefcore_ranking); @@ -1190,7 +1192,7 @@ static ssize_t show_energy_performance_preference( struct cpufreq_policy *policy, char *buf) { struct amd_cpudata *cpudata = policy->driver_data; - int preference; + u8 preference;
switch (cpudata->epp_cached) { case AMD_CPPC_EPP_PERFORMANCE: @@ -1552,7 +1554,7 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; - u32 epp; + u8 epp;
amd_pstate_update_min_max_limit(policy);
@@ -1601,7 +1603,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) static int amd_pstate_epp_reenable(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; - u64 max_perf; + u8 max_perf; int ret;
ret = amd_pstate_cppc_enable(true); @@ -1638,7 +1640,7 @@ static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; - int min_perf; + u8 min_perf;
if (cpudata->suspended) return 0; diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h index 9747e3be6cee..19d405c6d805 100644 --- a/drivers/cpufreq/amd-pstate.h +++ b/drivers/cpufreq/amd-pstate.h @@ -70,13 +70,13 @@ struct amd_cpudata { struct freq_qos_request req[2]; u64 cppc_req_cached;
- u32 highest_perf; - u32 nominal_perf; - u32 lowest_nonlinear_perf; - u32 lowest_perf; - u32 prefcore_ranking; - u32 min_limit_perf; - u32 max_limit_perf; + u8 highest_perf; + u8 nominal_perf; + u8 lowest_nonlinear_perf; + u8 lowest_perf; + u8 prefcore_ranking; + u8 min_limit_perf; + u8 max_limit_perf; u32 min_limit_freq; u32 max_limit_freq;
@@ -93,11 +93,11 @@ struct amd_cpudata { bool hw_prefcore;
/* EPP feature related attributes*/ - s16 epp_cached; + u8 epp_cached; u32 policy; u64 cppc_cap1_cached; bool suspended; - s16 epp_default; + u8 epp_default; };
/* diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index 7a979db81f09..5a3545bd0d8d 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -47,7 +47,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk, { int cpu;
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { struct device *cpu_dev; struct clk *clk;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 3a7c3372bda7..f3913eea5e55 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -303,7 +303,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev) int ret, cpu;
/* Request resources early so we can return in case of -EPROBE_DEFER */ - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { ret = dt_cpufreq_early_init(&pdev->dev, cpu); if (ret) goto err; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index af44ee6a6430..1a7fcaf39cc9 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy) time_elapsed = update_time - j_cdbs->prev_update_time; j_cdbs->prev_update_time = update_time;
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; + /* + * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if + * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is + * off, where idle_time is calculated by the difference between + * time elapsed in jiffies and "busy time" obtained from CPU + * statistics. If a CPU is 100% busy, the time elapsed and busy + * time should grow with the same amount in two consecutive + * samples, but in practice there could be a tiny difference, + * making the accumulated idle time decrease sometimes. Hence, + * in this case, idle_time should be regarded as 0 in order to + * make the further process correct. + */ + if (cur_idle_time > j_cdbs->prev_cpu_idle) + idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; + else + idle_time = 0; + j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) { @@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * calls, so the previous load value can be used then. */ load = j_cdbs->prev_load; - } else if (unlikely((int)idle_time > 2 * sampling_rate && + } else if (unlikely(idle_time > 2 * sampling_rate && j_cdbs->prev_load)) { /* * If the CPU had gone completely idle and a task has @@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy) load = j_cdbs->prev_load; j_cdbs->prev_load = 0; } else { - if (time_elapsed >= idle_time) { + if (time_elapsed > idle_time) load = 100 * (time_elapsed - idle_time) / time_elapsed; - } else { - /* - * That can happen if idle_time is returned by - * get_cpu_idle_time_jiffy(). In that case - * idle_time is roughly equal to the difference - * between time_elapsed and "busy time" obtained - * from CPU statistics. Then, the "busy time" - * can end up being greater than time_elapsed - * (for example, if jiffies_64 and the CPU - * statistics are updated by different CPUs), - * so idle_time may in fact be negative. That - * means, though, that the CPU was busy all - * the time (on the rough average) during the - * last sampling interval and 100 can be - * returned as the load. - */ - load = (int)idle_time < 0 ? 100 : 0; - } + else + load = 0; + j_cdbs->prev_load = load; }
- if (unlikely((int)idle_time > 2 * sampling_rate)) { + if (unlikely(idle_time > 2 * sampling_rate)) { unsigned int periods = idle_time / sampling_rate;
if (periods < idle_periods) diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index 9252ebd60373..478257523cc3 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -304,7 +304,7 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev) struct regulator *cpu_reg;
/* Make sure that all CPU supplies are available before proceeding. */ - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { cpu_dev = get_cpu_device(cpu); if (!cpu_dev) return dev_err_probe(&pdev->dev, -EPROBE_DEFER, diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 663f61565cf7..2e4f9ca0af35 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -632,7 +632,7 @@ static int mtk_cpufreq_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, -ENODEV, "failed to get mtk cpufreq platform data\n");
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { info = mtk_cpu_dvfs_info_lookup(cpu); if (info) continue; diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c index 7f3cfe668f30..2aad4c04673c 100644 --- a/drivers/cpufreq/mvebu-cpufreq.c +++ b/drivers/cpufreq/mvebu-cpufreq.c @@ -56,7 +56,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void) * it), and registers the clock notifier that will take care * of doing the PMSU part of a frequency transition. */ - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { struct device *cpu_dev; struct clk *clk; int ret; diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index b2e7e89feaac..dce7cad1813f 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -306,7 +306,7 @@ static void qcom_get_related_cpus(int index, struct cpumask *m) struct of_phandle_args args; int cpu, ret;
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { cpu_np = of_cpu_device_node_get(cpu); if (!cpu_np) continue; diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 3a8ed723a23e..54f8117103c8 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -489,7 +489,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) nvmem_cell_put(speedbin_nvmem); }
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { struct dev_pm_opp_config config = { .supported_hw = NULL, }; @@ -543,7 +543,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) dev_err(cpu_dev, "Failed to register platform device\n");
free_opp: - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { dev_pm_domain_detach_list(drv->cpus[cpu].pd_list); dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); } @@ -557,7 +557,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { dev_pm_domain_detach_list(drv->cpus[cpu].pd_list); dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); } @@ -568,7 +568,7 @@ static int qcom_cpufreq_suspend(struct device *dev) struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev); unsigned int cpu;
- for_each_possible_cpu(cpu) + for_each_present_cpu(cpu) qcom_cpufreq_suspend_pd_devs(drv, cpu);
return 0; diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index b8fe758aeb01..914bf2c940a0 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -104,7 +104,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, int domain, int cpu, tdomain; struct device *tcpu_dev;
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { if (cpu == cpu_dev->id) continue;
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index cd89c1b9832c..1f97b949763f 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -39,8 +39,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) static int scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { - u64 rate = policy->freq_table[index].frequency * 1000; + unsigned long freq_khz = policy->freq_table[index].frequency; struct scpi_data *priv = policy->driver_data; + unsigned long rate = freq_khz * 1000; int ret;
ret = clk_set_rate(priv->clk, rate); @@ -48,7 +49,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) if (ret) return ret;
- if (clk_get_rate(priv->clk) != rate) + if (clk_get_rate(priv->clk) / 1000 != freq_khz) return -EIO;
return 0; @@ -64,7 +65,7 @@ scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) if (domain < 0) return domain;
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { if (cpu == cpu_dev->id) continue;
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 17d6a149f580..47d6840b3489 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -262,7 +262,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) snprintf(name, sizeof(name), "speed%d", speed); config.prop_name = name;
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { struct device *cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) { @@ -288,7 +288,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) pr_err("Failed to register platform device\n");
free_opp: - for_each_possible_cpu(cpu) + for_each_present_cpu(cpu) dev_pm_opp_clear_config(opp_tokens[cpu]); kfree(opp_tokens);
@@ -302,7 +302,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu) + for_each_present_cpu(cpu) dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens); diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c index a050b3a6737f..272dc3c85106 100644 --- a/drivers/cpufreq/virtual-cpufreq.c +++ b/drivers/cpufreq/virtual-cpufreq.c @@ -138,7 +138,7 @@ static int virt_cpufreq_get_sharing_cpus(struct cpufreq_policy *policy) cur_perf_domain = readl_relaxed(base + policy->cpu * PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { cpu_dev = get_cpu_device(cpu); if (!cpu_dev) continue; diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index caba6f4bb1b7..e044fefdb816 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -137,9 +137,9 @@ static int __init arm_idle_init_cpu(int cpu) /* * arm_idle_init - Initializes arm cpuidle driver * - * Initializes arm cpuidle driver for all CPUs, if any CPU fails - * to register cpuidle driver then rollback to cancel all CPUs - * registration. + * Initializes arm cpuidle driver for all present CPUs, if any + * CPU fails to register cpuidle driver then rollback to cancel + * all CPUs registration. */ static int __init arm_idle_init(void) { @@ -147,7 +147,7 @@ static int __init arm_idle_init(void) struct cpuidle_driver *drv; struct cpuidle_device *dev;
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { ret = arm_idle_init_cpu(cpu); if (ret) goto out_fail; diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c index 74972deda0ea..4abba42fcc31 100644 --- a/drivers/cpuidle/cpuidle-big_little.c +++ b/drivers/cpuidle/cpuidle-big_little.c @@ -148,7 +148,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id) if (!cpumask) return -ENOMEM;
- for_each_possible_cpu(cpu) + for_each_present_cpu(cpu) if (smp_cpuid_part(cpu) == part_id) cpumask_set_cpu(cpu, cpumask);
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 2562dc001fc1..a4594c3d6562 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -400,7 +400,7 @@ static int psci_idle_init_cpu(struct device *dev, int cpu) /* * psci_idle_probe - Initializes PSCI cpuidle driver * - * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails + * Initializes PSCI cpuidle driver for all present CPUs, if any CPU fails * to register cpuidle driver then rollback to cancel all CPUs * registration. */ @@ -410,7 +410,7 @@ static int psci_cpuidle_probe(struct platform_device *pdev) struct cpuidle_driver *drv; struct cpuidle_device *dev;
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { ret = psci_idle_init_cpu(&pdev->dev, cpu); if (ret) goto out_fail; diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c index 3ab240e0e122..5f386761b156 100644 --- a/drivers/cpuidle/cpuidle-qcom-spm.c +++ b/drivers/cpuidle/cpuidle-qcom-spm.c @@ -135,7 +135,7 @@ static int spm_cpuidle_drv_probe(struct platform_device *pdev) if (ret) return dev_err_probe(&pdev->dev, ret, "set warm boot addr failed");
- for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { ret = spm_cpuidle_register(&pdev->dev, cpu); if (ret && ret != -ENODEV) { dev_err(&pdev->dev, diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index 0c92a628bbd4..0fe1ece9fbdc 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -529,8 +529,8 @@ static int sbi_cpuidle_probe(struct platform_device *pdev) return ret; }
- /* Initialize CPU idle driver for each CPU */ - for_each_possible_cpu(cpu) { + /* Initialize CPU idle driver for each present CPU */ + for_each_present_cpu(cpu) { ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu); if (ret) { pr_debug("HART%ld: idle driver init failed\n", diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 4b9970230822..703920b49c7c 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -37,7 +37,6 @@ struct sec_aead_req { u8 *a_ivin; dma_addr_t a_ivin_dma; struct aead_request *aead_req; - bool fallback; };
/* SEC request of Crypto */ diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 66bc07da9eb6..8ea5305bc320 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -57,7 +57,6 @@ #define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 #define SEC_ICV_MASK 0x000E -#define SEC_SQE_LEN_RATE_MASK 0x3
#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) #define SEC_SGL_SGE_NR 128 @@ -80,16 +79,16 @@ #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ SEC_PBUF_LEFT_SZ(depth))
-#define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 #define SEC_SQE_DONE 0x1 #define SEC_ICV_ERR 0x2 -#define MIN_MAC_LEN 4 #define MAC_LEN_MASK 0x1U #define MAX_INPUT_DATA_LEN 0xFFFE00 #define BITS_MASK 0xFF +#define WORD_MASK 0x3 #define BYTE_BITS 0x8 +#define BYTES_TO_WORDS(bcount) ((bcount) >> 2) #define SEC_XTS_NAME_SZ 0x3 #define IV_CM_CAL_NUM 2 #define IV_CL_MASK 0x7 @@ -691,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
c_ctx->fallback = false;
- /* Currently, only XTS mode need fallback tfm when using 192bit key */ - if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) - return 0; - c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(c_ctx->fbtfm)) { - pr_err("failed to alloc xts mode fallback tfm!\n"); + pr_err("failed to alloc fallback tfm for %s!\n", alg); return PTR_ERR(c_ctx->fbtfm); }
@@ -858,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, }
memcpy(c_ctx->c_key, key, keylen); - if (c_ctx->fallback && c_ctx->fbtfm) { + if (c_ctx->fbtfm) { ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); if (ret) { dev_err(dev, "failed to set fallback skcipher key!\n"); @@ -1090,11 +1085,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_shash *hash_tfm = ctx->hash_tfm; int blocksize, digestsize, ret;
- if (!keys->authkeylen) { - pr_err("hisi_sec2: aead auth key error!\n"); - return -EINVAL; - } - blocksize = crypto_shash_blocksize(hash_tfm); digestsize = crypto_shash_digestsize(hash_tfm); if (keys->authkeylen > blocksize) { @@ -1106,7 +1096,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, } ctx->a_key_len = digestsize; } else { - memcpy(ctx->a_key, keys->authkey, keys->authkeylen); + if (keys->authkeylen) + memcpy(ctx->a_key, keys->authkey, keys->authkeylen); ctx->a_key_len = keys->authkeylen; }
@@ -1160,8 +1151,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, }
ret = crypto_authenc_extractkeys(&keys, key, keylen); - if (ret) + if (ret) { + dev_err(dev, "sec extract aead keys err!\n"); goto bad_key; + }
ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { @@ -1175,12 +1168,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; }
- if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) { - ret = -EINVAL; - dev_err(dev, "AUTH key length error!\n"); - goto bad_key; - } - ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); if (ret) { dev_err(dev, "set sec fallback key err!\n"); @@ -1583,11 +1570,10 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
- sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE); + sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
sec_sqe->type2.mac_key_alg |= - cpu_to_le32((u32)((ctx->a_key_len) / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); @@ -1639,12 +1625,10 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |= - cpu_to_le32((u32)(authsize / - SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); + cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->a_key_len / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
sqe3->auth_mac_key |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); @@ -2003,8 +1987,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); }
-static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, - struct sec_req *sreq) +static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq) { u32 cryptlen = sreq->c_req.sk_req->cryptlen; struct device *dev = ctx->dev; @@ -2026,10 +2009,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, } break; case SEC_CMODE_CTR: - if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { - dev_err(dev, "skcipher HW version error!\n"); - ret = -EINVAL; - } break; default: ret = -EINVAL; @@ -2038,17 +2017,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, return ret; }
-static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_skcipher_param_check(struct sec_ctx *ctx, + struct sec_req *sreq, bool *need_fallback) { struct skcipher_request *sk_req = sreq->c_req.sk_req; struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg;
- if (unlikely(!sk_req->src || !sk_req->dst || - sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { + if (unlikely(!sk_req->src || !sk_req->dst)) { dev_err(dev, "skcipher input param error!\n"); return -EINVAL; } + + if (sk_req->cryptlen > MAX_INPUT_DATA_LEN) + *need_fallback = true; + sreq->c_req.c_len = sk_req->cryptlen;
if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) @@ -2106,6 +2089,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); struct sec_req *req = skcipher_request_ctx(sk_req); struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + bool need_fallback = false; int ret;
if (!sk_req->cryptlen) { @@ -2119,11 +2103,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.encrypt = encrypt; req->ctx = ctx;
- ret = sec_skcipher_param_check(ctx, req); + ret = sec_skcipher_param_check(ctx, req, &need_fallback); if (unlikely(ret)) return -EINVAL;
- if (unlikely(ctx->c_ctx.fallback)) + if (unlikely(ctx->c_ctx.fallback || need_fallback)) return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
return ctx->req_op->process(ctx, req); @@ -2231,52 +2215,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t sz = crypto_aead_authsize(tfm); u8 c_mode = ctx->c_ctx.c_mode; - struct device *dev = ctx->dev; int ret;
- /* Hardware does not handle cases where authsize is less than 4 bytes */ - if (unlikely(sz < MIN_MAC_LEN)) { - sreq->aead_req.fallback = true; + if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len)) return -EINVAL; - }
if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || - req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(dev, "aead input spec error!\n"); + req->assoclen > SEC_MAX_AAD_LEN)) return -EINVAL; - }
if (c_mode == SEC_CMODE_CCM) { - if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { - dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); + if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) return -EINVAL; - } - ret = aead_iv_demension_check(req); - if (ret) { - dev_err(dev, "aead input iv param error!\n"); - return ret; - } - }
- if (sreq->c_req.encrypt) - sreq->c_req.c_len = req->cryptlen; - else - sreq->c_req.c_len = req->cryptlen - sz; - if (c_mode == SEC_CMODE_CBC) { - if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(dev, "aead crypto length error!\n"); + ret = aead_iv_demension_check(req); + if (unlikely(ret)) + return -EINVAL; + } else if (c_mode == SEC_CMODE_CBC) { + if (unlikely(sz & WORD_MASK)) + return -EINVAL; + if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK)) return -EINVAL; - } }
return 0; }
-static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback) { struct aead_request *req = sreq->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg;
@@ -2285,12 +2252,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; }
- if (ctx->sec->qm.ver == QM_HW_V2) { - if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && - req->cryptlen <= authsize))) { - sreq->aead_req.fallback = true; - return -EINVAL; - } + if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC && + sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "aead cbc mode input data length error!\n"); + return -EINVAL; }
/* Support AES or SM4 */ @@ -2299,8 +2264,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; }
- if (unlikely(sec_aead_spec_check(ctx, sreq))) + if (unlikely(sec_aead_spec_check(ctx, sreq))) { + *need_fallback = true; return -EINVAL; + }
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= SEC_PBUF_SZ) @@ -2344,17 +2311,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); struct sec_req *req = aead_request_ctx(a_req); struct sec_ctx *ctx = crypto_aead_ctx(tfm); + size_t sz = crypto_aead_authsize(tfm); + bool need_fallback = false; int ret;
req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; - req->aead_req.fallback = false; + req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
- ret = sec_aead_param_check(ctx, req); + ret = sec_aead_param_check(ctx, req, &need_fallback); if (unlikely(ret)) { - if (req->aead_req.fallback) + if (need_fallback) return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; } diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index c3776b0de51d..990ea46955bb 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1537,7 +1537,7 @@ static int iaa_comp_acompress(struct acomp_req *req) iaa_wq = idxd_wq_get_private(wq);
if (!req->dst) { - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
/* incompressible data will always be < 2 * slen */ req->dlen = 2 * req->slen; @@ -1619,7 +1619,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) { - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct crypto_tfm *tfm = req->base.tfm; dma_addr_t src_addr, dst_addr; diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 9faef33e54bd..a17adc4beda2 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -420,6 +420,7 @@ static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK; dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; }
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 2dd3772bf58a..0f7f00a19e7d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -695,7 +695,7 @@ static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, if (err_mask->parerr_wat_wcp_mask) adf_poll_slicehang_csr(accel_dev, csr, ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, - "ath_cph"); + "wat_wcp");
return false; } @@ -1043,63 +1043,16 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, return reset_required; }
-static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, +static void adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); - u32 reg; - if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) - return false; - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); - reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); - reg &= err_mask->parerr_ath_cph_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); - reg &= err_mask->parerr_cpr_xlt_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); - reg &= err_mask->parerr_dcpr_ucs_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); - reg &= err_mask->parerr_pke_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); - } - - if (err_mask->parerr_wat_wcp_mask) { - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); - reg &= err_mask->parerr_wat_wcp_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, - reg); - } - } + return;
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported");
- return false; + return; }
static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, @@ -1171,8 +1124,8 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); - reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); + adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm);
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 1660c5cf3641..56129bdf53ab 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -1145,6 +1145,7 @@ static void __init nxcop_get_capabilities(void) { struct hv_vas_all_caps *hv_caps; struct hv_nx_cop_caps *hv_nxc; + u64 feat; int rc;
hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL); @@ -1155,27 +1156,26 @@ static void __init nxcop_get_capabilities(void) */ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0, (u64)virt_to_phys(hv_caps)); + if (!rc) + feat = be64_to_cpu(hv_caps->feat_type); + kfree(hv_caps); if (rc) - goto out; + return; + if (!(feat & VAS_NX_GZIP_FEAT_BIT)) + return;
- caps_feat = be64_to_cpu(hv_caps->feat_type); /* * NX-GZIP feature available */ - if (caps_feat & VAS_NX_GZIP_FEAT_BIT) { - hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); - if (!hv_nxc) - goto out; - /* - * Get capabilities for NX-GZIP feature - */ - rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, - VAS_NX_GZIP_FEAT, - (u64)virt_to_phys(hv_nxc)); - } else { - pr_err("NX-GZIP feature is not available\n"); - rc = -EINVAL; - } + hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); + if (!hv_nxc) + return; + /* + * Get capabilities for NX-GZIP feature + */ + rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, + VAS_NX_GZIP_FEAT, + (u64)virt_to_phys(hv_nxc));
if (!rc) { nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor); @@ -1185,13 +1185,10 @@ static void __init nxcop_get_capabilities(void) be64_to_cpu(hv_nxc->min_compress_len); nx_cop_caps.min_decompress_len = be64_to_cpu(hv_nxc->min_decompress_len); - } else { - caps_feat = 0; + caps_feat = feat; }
kfree(hv_nxc); -out: - kfree(hv_caps); }
static const struct vio_device_id nx842_vio_driver_ids[] = { diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index d734c9a56786..ca9d0cca1f74 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -28,6 +28,9 @@ struct tegra_aes_ctx { u32 ivsize; u32 key1_id; u32 key2_id; + u32 keylen; + u8 key1[AES_MAX_KEY_SIZE]; + u8 key2[AES_MAX_KEY_SIZE]; };
struct tegra_aes_reqctx { @@ -43,8 +46,9 @@ struct tegra_aead_ctx { struct tegra_se *se; unsigned int authsize; u32 alg; - u32 keylen; u32 key_id; + u32 keylen; + u8 key[AES_MAX_KEY_SIZE]; };
struct tegra_aead_reqctx { @@ -56,8 +60,8 @@ struct tegra_aead_reqctx { unsigned int cryptlen; unsigned int authsize; bool encrypt; - u32 config; u32 crypto_config; + u32 config; u32 key_id; u32 iv[4]; u8 authdata[16]; @@ -67,6 +71,8 @@ struct tegra_cmac_ctx { struct tegra_se *se; unsigned int alg; u32 key_id; + u32 keylen; + u8 key[AES_MAX_KEY_SIZE]; struct crypto_shash *fallback_tfm; };
@@ -260,17 +266,13 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); struct tegra_se *se = ctx->se; - unsigned int cmdlen; + unsigned int cmdlen, key1_id, key2_id; int ret;
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - return -ENOMEM; - - rctx->datbuf.size = SE_AES_BUFLEN; rctx->iv = (u32 *)req->iv; rctx->len = req->cryptlen; + key1_id = ctx->key1_id; + key2_id = ctx->key2_id;
/* Pad input to AES Block size */ if (ctx->alg != SE_ALG_XTS) { @@ -278,20 +280,59 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE); }
+ rctx->datbuf.size = rctx->len; + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_finalize; + } + scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
+ rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt); + + if (!key1_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1, + ctx->keylen, ctx->alg, &key1_id); + if (ret) + goto out; + } + + rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id); + + if (ctx->alg == SE_ALG_XTS) { + if (!key2_id) { + ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2, + ctx->keylen, ctx->alg, &key2_id); + if (ret) + goto out; + } + + rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id); + } + /* Prepare the command and submit for execution */ cmdlen = tegra_aes_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/* Copy the result */ tegra_aes_update_iv(req, ctx); scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
+out: /* Free the buffer */ - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr);
+ if (tegra_key_is_reserved(key1_id)) + tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg); + + if (tegra_key_is_reserved(key2_id)) + tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg); + +out_finalize: crypto_finalize_skcipher_request(se->engine, req, ret);
return 0; @@ -313,6 +354,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm) ctx->se = se_alg->se_dev; ctx->key1_id = 0; ctx->key2_id = 0; + ctx->keylen = 0;
algname = crypto_tfm_alg_name(&tfm->base); ret = se_algname_to_algid(algname); @@ -341,13 +383,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, u32 keylen) { struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret;
if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; }
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key1, key, keylen); + } + + return 0; }
static int tegra_xts_setkey(struct crypto_skcipher *tfm, @@ -365,11 +414,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm,
ret = tegra_key_submit(ctx->se, key, len, ctx->alg, &ctx->key1_id); - if (ret) - return ret; + if (ret) { + ctx->keylen = len; + memcpy(ctx->key1, key, len); + }
- return tegra_key_submit(ctx->se, key + len, len, + ret = tegra_key_submit(ctx->se, key + len, len, ctx->alg, &ctx->key2_id); + if (ret) { + ctx->keylen = len; + memcpy(ctx->key2, key + len, len); + }
return 0; } @@ -443,13 +498,10 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) if (!req->cryptlen) return 0;
- rctx->encrypt = encrypt; - rctx->config = tegra234_aes_cfg(ctx->alg, encrypt); - rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt); - rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id); + if (ctx->alg == SE_ALG_ECB) + req->iv = NULL;
- if (ctx->key2_id) - rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id); + rctx->encrypt = encrypt;
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); } @@ -715,11 +767,11 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id);
cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
- return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); }
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) @@ -732,11 +784,11 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */ cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret;
@@ -755,11 +807,11 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */ cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret;
@@ -886,12 +938,12 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */ cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
- return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); }
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) @@ -1073,7 +1125,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id);
/* Copy authdata in the top of buffer for encryption/decryption */ if (rctx->encrypt) @@ -1098,7 +1150,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
/* Prepare command and submit */ cmdlen = tegra_ctr_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret;
@@ -1117,6 +1169,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se, rctx->assoclen = req->assoclen; rctx->authsize = crypto_aead_authsize(tfm);
+ if (rctx->encrypt) + rctx->cryptlen = req->cryptlen; + else + rctx->cryptlen = req->cryptlen - rctx->authsize; + memcpy(iv, req->iv, 16);
ret = tegra_ccm_check_iv(iv); @@ -1145,30 +1202,35 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret;
+ ret = tegra_ccm_crypt_init(req, se, rctx); + if (ret) + goto out_finalize; + + rctx->key_id = ctx->key_id; + /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, &rctx->inbuf.addr, GFP_KERNEL); if (!rctx->inbuf.buf) - return -ENOMEM; - - rctx->inbuf.size = SE_AES_BUFLEN; + goto out_finalize;
- rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, &rctx->outbuf.addr, GFP_KERNEL); if (!rctx->outbuf.buf) { ret = -ENOMEM; - goto outbuf_err; + goto out_free_inbuf; }
- rctx->outbuf.size = SE_AES_BUFLEN; - - ret = tegra_ccm_crypt_init(req, se, rctx); - if (ret) - goto out; + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + }
if (rctx->encrypt) { - rctx->cryptlen = req->cryptlen; - /* CBC MAC Operation */ ret = tegra_ccm_compute_auth(ctx, rctx); if (ret) @@ -1179,8 +1241,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) if (ret) goto out; } else { - rctx->cryptlen = req->cryptlen - ctx->authsize; - /* CTR operation */ ret = tegra_ccm_do_ctr(ctx, rctx); if (ret) @@ -1193,13 +1253,17 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) }
out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->outbuf.buf, rctx->outbuf.addr);
-outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, +out_free_inbuf: + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->inbuf.buf, rctx->inbuf.addr);
+ if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); + +out_finalize: crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0; @@ -1213,23 +1277,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aead_reqctx *rctx = aead_request_ctx(req); int ret;
- /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->inbuf.addr, GFP_KERNEL); - if (!rctx->inbuf.buf) - return -ENOMEM; - - rctx->inbuf.size = SE_AES_BUFLEN; - - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->outbuf.addr, GFP_KERNEL); - if (!rctx->outbuf.buf) { - ret = -ENOMEM; - goto outbuf_err; - } - - rctx->outbuf.size = SE_AES_BUFLEN; - rctx->src_sg = req->src; rctx->dst_sg = req->dst; rctx->assoclen = req->assoclen; @@ -1243,6 +1290,32 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); rctx->iv[3] = (1 << 24);
+ rctx->key_id = ctx->key_id; + + /* Allocate buffers required */ + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) { + ret = -ENOMEM; + goto out_finalize; + } + + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) { + ret = -ENOMEM; + goto out_free_inbuf; + } + + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + } + /* If there is associated data perform GMAC operation */ if (rctx->assoclen) { ret = tegra_gcm_do_gmac(ctx, rctx); @@ -1266,14 +1339,17 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) ret = tegra_gcm_do_verify(ctx->se, rctx);
out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->outbuf.buf, rctx->outbuf.addr);
-outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, +out_free_inbuf: + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->inbuf.buf, rctx->inbuf.addr);
- /* Finalize the request if there are no errors */ + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); + +out_finalize: crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0; @@ -1295,6 +1371,7 @@ static int tegra_aead_cra_init(struct crypto_aead *tfm)
ctx->se = se_alg->se_dev; ctx->key_id = 0; + ctx->keylen = 0;
ret = se_algname_to_algid(algname); if (ret < 0) { @@ -1376,13 +1453,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm, const u8 *key, u32 keylen) { struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + int ret;
if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; }
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + } + + return 0; }
static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx, @@ -1456,6 +1540,35 @@ static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqct se->base + se->hw->regs->result + (i * 4)); }
+static int tegra_cmac_do_init(struct ahash_request *req) +{ + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + int i; + + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->key_id = ctx->key_id; + rctx->task |= SHA_FIRST; + rctx->blk_size = crypto_ahash_blocksize(tfm); + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + return -ENOMEM; + + rctx->residue.size = 0; + + /* Clear any previous result */ + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(0, se->base + se->hw->regs->result + (i * 4)); + + return 0; +} + static int tegra_cmac_do_update(struct ahash_request *req) { struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); @@ -1483,7 +1596,7 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; rctx->total_len += rctx->datbuf.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); - rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id); + rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
/* * Keep one block and residue bytes in residue and @@ -1497,6 +1610,11 @@ static int tegra_cmac_do_update(struct ahash_request *req) return 0; }
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -1511,23 +1629,19 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->residue.size = nresidue;
/* - * If this is not the first 'update' call, paste the previous copied + * If this is not the first task, paste the previous copied * intermediate results to the registers so that it gets picked up. - * This is to support the import/export functionality. */ if (!(rctx->task & SHA_FIRST)) tegra_cmac_paste_result(ctx->se, rctx);
cmdlen = tegra_cmac_prep_cmd(ctx, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
- ret = tegra_se_host1x_submit(se, cmdlen); - /* - * If this is not the final update, copy the intermediate results - * from the registers so that it can be used in the next 'update' - * call. This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FINAL)) - tegra_cmac_copy_result(ctx->se, rctx); + tegra_cmac_copy_result(ctx->se, rctx); + + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr);
return ret; } @@ -1543,17 +1657,34 @@ static int tegra_cmac_do_final(struct ahash_request *req)
if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { return crypto_shash_tfm_digest(ctx->fallback_tfm, - rctx->datbuf.buf, 0, req->result); + NULL, 0, req->result); + } + + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); }
- memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
+ /* + * If this is not the first task, paste the previous copied + * intermediate results to the registers so that it gets picked up. + */ + if (!(rctx->task & SHA_FIRST)) + tegra_cmac_paste_result(ctx->se, rctx); + /* Prepare command and submit */ cmdlen = tegra_cmac_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) goto out;
@@ -1565,8 +1696,10 @@ static int tegra_cmac_do_final(struct ahash_request *req) writel(0, se->base + se->hw->regs->result + (i * 4));
out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, rctx->residue.buf, rctx->residue.addr); return ret; @@ -1579,17 +1712,41 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - int ret; + int ret = 0; + + if (rctx->task & SHA_INIT) { + ret = tegra_cmac_do_init(req); + if (ret) + goto out; + + rctx->task &= ~SHA_INIT; + } + + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + }
if (rctx->task & SHA_UPDATE) { ret = tegra_cmac_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; }
if (rctx->task & SHA_FINAL) { ret = tegra_cmac_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } +out: + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
crypto_finalize_hash_request(se->engine, req, ret);
@@ -1631,6 +1788,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
ctx->se = se_alg->se_dev; ctx->key_id = 0; + ctx->keylen = 0;
ret = se_algname_to_algid(algname); if (ret < 0) { @@ -1655,51 +1813,11 @@ static void tegra_cmac_cra_exit(struct crypto_tfm *tfm) tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); }
-static int tegra_cmac_init(struct ahash_request *req) -{ - struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); - struct tegra_se *se = ctx->se; - int i; - - rctx->total_len = 0; - rctx->datbuf.size = 0; - rctx->residue.size = 0; - rctx->task = SHA_FIRST; - rctx->blk_size = crypto_ahash_blocksize(tfm); - - rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, - &rctx->residue.addr, GFP_KERNEL); - if (!rctx->residue.buf) - goto resbuf_fail; - - rctx->residue.size = 0; - - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - - rctx->datbuf.size = 0; - - /* Clear any previous result */ - for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - writel(0, se->base + se->hw->regs->result + (i * 4)); - - return 0; - -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); -resbuf_fail: - return -ENOMEM; -} - static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + int ret;
if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); @@ -1709,7 +1827,24 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, if (ctx->fallback_tfm) crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + } + + return 0; +} + +static int tegra_cmac_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + + rctx->task = SHA_INIT; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); }
static int tegra_cmac_update(struct ahash_request *req) @@ -1750,13 +1885,9 @@ static int tegra_cmac_digest(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); - int ret;
- ret = tegra_cmac_init(req); - if (ret) - return ret; + rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
- rctx->task |= SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); }
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 0b5cdd5676b1..42d007b7af45 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -34,6 +34,7 @@ struct tegra_sha_reqctx { struct tegra_se_datbuf datbuf; struct tegra_se_datbuf residue; struct tegra_se_datbuf digest; + struct tegra_se_datbuf intr_res; unsigned int alg; unsigned int config; unsigned int total_len; @@ -211,9 +212,62 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out) return crypto_ahash_export(&rctx->fallback_req, out); }
-static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, +static int tegra_se_insert_hash_result(struct tegra_sha_ctx *ctx, u32 *cpuvaddr, + struct tegra_sha_reqctx *rctx) +{ + __be32 *res_be = (__be32 *)rctx->intr_res.buf; + u32 *res = (u32 *)rctx->intr_res.buf; + int i = 0, j; + + cpuvaddr[i++] = 0; + cpuvaddr[i++] = host1x_opcode_setpayload(HASH_RESULT_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_HASH_RESULT); + + for (j = 0; j < HASH_RESULT_REG_COUNT; j++) { + int idx = j; + + /* + * The initial, intermediate and final hash value of SHA-384, SHA-512 + * in SHA_HASH_RESULT registers follow the below layout of bytes. + * + * +---------------+------------+ + * | HASH_RESULT_0 | B4...B7 | + * +---------------+------------+ + * | HASH_RESULT_1 | B0...B3 | + * +---------------+------------+ + * | HASH_RESULT_2 | B12...B15 | + * +---------------+------------+ + * | HASH_RESULT_3 | B8...B11 | + * +---------------+------------+ + * | ...... | + * +---------------+------------+ + * | HASH_RESULT_14| B60...B63 | + * +---------------+------------+ + * | HASH_RESULT_15| B56...B59 | + * +---------------+------------+ + * + */ + if (ctx->alg == SE_ALG_SHA384 || ctx->alg == SE_ALG_SHA512) + idx = (j % 2) ? j - 1 : j + 1; + + /* For SHA-1, SHA-224, SHA-256, SHA-384, SHA-512 the initial + * intermediate and final hash value when stored in + * SHA_HASH_RESULT registers, the byte order is NOT in + * little-endian. + */ + if (ctx->alg <= SE_ALG_SHA512) + cpuvaddr[i++] = be32_to_cpu(res_be[idx]); + else + cpuvaddr[i++] = res[idx]; + } + + return i; +} + +static int tegra_sha_prep_cmd(struct tegra_sha_ctx *ctx, u32 *cpuvaddr, struct tegra_sha_reqctx *rctx) { + struct tegra_se *se = ctx->se; u64 msg_len, msg_left; int i = 0;
@@ -241,7 +295,7 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = upper_32_bits(msg_left); cpuvaddr[i++] = 0; cpuvaddr[i++] = 0; - cpuvaddr[i++] = host1x_opcode_setpayload(6); + cpuvaddr[i++] = host1x_opcode_setpayload(2); cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG); cpuvaddr[i++] = rctx->config;
@@ -249,15 +303,29 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT; rctx->task &= ~SHA_FIRST; } else { - cpuvaddr[i++] = 0; + /* + * If it isn't the first task, program the HASH_RESULT register + * with the intermediate result from the previous task + */ + i += tegra_se_insert_hash_result(ctx, cpuvaddr + i, rctx); }
+ cpuvaddr[i++] = host1x_opcode_setpayload(4); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_IN_ADDR); cpuvaddr[i++] = rctx->datbuf.addr; cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) | SE_ADDR_HI_SZ(rctx->datbuf.size)); - cpuvaddr[i++] = rctx->digest.addr; - cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | - SE_ADDR_HI_SZ(rctx->digest.size)); + + if (rctx->task & SHA_UPDATE) { + cpuvaddr[i++] = rctx->intr_res.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->intr_res.addr)) | + SE_ADDR_HI_SZ(rctx->intr_res.size)); + } else { + cpuvaddr[i++] = rctx->digest.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | + SE_ADDR_HI_SZ(rctx->digest.size)); + } + if (rctx->key_id) { cpuvaddr[i++] = host1x_opcode_setpayload(1); cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG); @@ -266,42 +334,72 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = host1x_opcode_setpayload(1); cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION); - cpuvaddr[i++] = SE_SHA_OP_WRSTALL | - SE_SHA_OP_START | + cpuvaddr[i++] = SE_SHA_OP_WRSTALL | SE_SHA_OP_START | SE_SHA_OP_LASTBUF; cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
- dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x", - msg_len, msg_left, rctx->config); + dev_dbg(se->dev, "msg len %llu msg left %llu sz %zd cfg %#x", + msg_len, msg_left, rctx->datbuf.size, rctx->config);
return i; }
-static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) +static int tegra_sha_do_init(struct ahash_request *req) { - int i; + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se;
- for (i = 0; i < HASH_RESULT_REG_COUNT; i++) - rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4)); -} + if (ctx->fallback) + return tegra_sha_fallback_init(req);
-static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) -{ - int i; + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->key_id = ctx->key_id; + rctx->task |= SHA_FIRST; + rctx->alg = ctx->alg; + rctx->blk_size = crypto_ahash_blocksize(tfm); + rctx->digest.size = crypto_ahash_digestsize(tfm); + + rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, + &rctx->digest.addr, GFP_KERNEL); + if (!rctx->digest.buf) + goto digbuf_fail; + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + goto resbuf_fail; + + rctx->intr_res.size = HASH_RESULT_REG_COUNT * 4; + rctx->intr_res.buf = dma_alloc_coherent(se->dev, rctx->intr_res.size, + &rctx->intr_res.addr, GFP_KERNEL); + if (!rctx->intr_res.buf) + goto intr_res_fail; + + return 0;
- for (i = 0; i < HASH_RESULT_REG_COUNT; i++) - writel(rctx->result[i], - se->base + se->hw->regs->result + (i * 4)); +intr_res_fail: + dma_free_coherent(se->dev, rctx->residue.size, rctx->residue.buf, + rctx->residue.addr); +resbuf_fail: + dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, + rctx->digest.addr); +digbuf_fail: + return -ENOMEM; }
static int tegra_sha_do_update(struct ahash_request *req) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct tegra_se *se = ctx->se; unsigned int nblks, nresidue, size, ret; - u32 *cpuvaddr = ctx->se->cmdbuf->addr; + u32 *cpuvaddr = se->cmdbuf->addr;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size; @@ -317,7 +415,6 @@ static int tegra_sha_do_update(struct ahash_request *req)
rctx->src_sg = req->src; rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; - rctx->total_len += rctx->datbuf.size;
/* * If nbytes are less than a block size, copy it residue and @@ -326,11 +423,16 @@ static int tegra_sha_do_update(struct ahash_request *req) if (nblks < 1) { scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size, rctx->src_sg, 0, req->nbytes, 0); - rctx->residue.size += req->nbytes; + return 0; }
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -343,29 +445,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
/* Update residue value with the residue after current block */ rctx->residue.size = nresidue; + rctx->total_len += rctx->datbuf.size;
rctx->config = tegra_sha_get_config(rctx->alg) | - SE_SHA_DST_HASH_REG; - - /* - * If this is not the first 'update' call, paste the previous copied - * intermediate results to the registers so that it gets picked up. - * This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FIRST)) - tegra_sha_paste_hash_result(ctx->se, rctx); + SE_SHA_DST_MEMORY;
- size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx); + size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
- ret = tegra_se_host1x_submit(ctx->se, size); - - /* - * If this is not the final update, copy the intermediate results - * from the registers so that it can be used in the next 'update' - * call. This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FINAL)) - tegra_sha_copy_hash_result(ctx->se, rctx); + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr);
return ret; } @@ -379,16 +468,25 @@ static int tegra_sha_do_final(struct ahash_request *req) u32 *cpuvaddr = se->cmdbuf->addr; int size, ret = 0;
- memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + } + rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size;
rctx->config = tegra_sha_get_config(rctx->alg) | SE_SHA_DST_MEMORY;
- size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); - - ret = tegra_se_host1x_submit(se, size); + size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); if (ret) goto out;
@@ -396,12 +494,18 @@ static int tegra_sha_do_final(struct ahash_request *req) memcpy(req->result, rctx->digest.buf, rctx->digest.size);
out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm), rctx->residue.buf, rctx->residue.addr); dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, rctx->digest.addr); + + dma_free_coherent(se->dev, rctx->intr_res.size, rctx->intr_res.buf, + rctx->intr_res.addr); + return ret; }
@@ -414,16 +518,31 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret = 0;
+ if (rctx->task & SHA_INIT) { + ret = tegra_sha_do_init(req); + if (ret) + goto out; + + rctx->task &= ~SHA_INIT; + } + if (rctx->task & SHA_UPDATE) { ret = tegra_sha_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; }
if (rctx->task & SHA_FINAL) { ret = tegra_sha_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; }
+out: crypto_finalize_hash_request(se->engine, req, ret);
return 0; @@ -497,52 +616,6 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm) tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); }
-static int tegra_sha_init(struct ahash_request *req) -{ - struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); - struct tegra_se *se = ctx->se; - - if (ctx->fallback) - return tegra_sha_fallback_init(req); - - rctx->total_len = 0; - rctx->datbuf.size = 0; - rctx->residue.size = 0; - rctx->key_id = ctx->key_id; - rctx->task = SHA_FIRST; - rctx->alg = ctx->alg; - rctx->blk_size = crypto_ahash_blocksize(tfm); - rctx->digest.size = crypto_ahash_digestsize(tfm); - - rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, - &rctx->digest.addr, GFP_KERNEL); - if (!rctx->digest.buf) - goto digbuf_fail; - - rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size, - &rctx->residue.addr, GFP_KERNEL); - if (!rctx->residue.buf) - goto resbuf_fail; - - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - - return 0; - -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); -resbuf_fail: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf, - rctx->datbuf.addr); -digbuf_fail: - return -ENOMEM; -} - static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key, unsigned int keylen) { @@ -559,13 +632,29 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + int ret;
if (aes_check_keylen(keylen)) return tegra_hmac_fallback_setkey(ctx, key, keylen);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) + return tegra_hmac_fallback_setkey(ctx, key, keylen); + ctx->fallback = false;
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + return 0; +} + +static int tegra_sha_init(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + rctx->task = SHA_INIT; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); }
static int tegra_sha_update(struct ahash_request *req) @@ -615,16 +704,12 @@ static int tegra_sha_digest(struct ahash_request *req) struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); - int ret;
if (ctx->fallback) return tegra_sha_fallback_digest(req);
- ret = tegra_sha_init(req); - if (ret) - return ret; + rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
- rctx->task |= SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); }
diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c index ac14678dbd30..956fa9b4e9b1 100644 --- a/drivers/crypto/tegra/tegra-se-key.c +++ b/drivers/crypto/tegra/tegra-se-key.c @@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key, u32 keylen, u16 slot, u32 alg) { const u32 *keyval = (u32 *)key; - u32 *addr = se->cmdbuf->addr, size; + u32 *addr = se->keybuf->addr, size; + int ret; + + mutex_lock(&kslt_lock);
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg); + ret = tegra_se_host1x_submit(se, se->keybuf, size); + + mutex_unlock(&kslt_lock);
- return tegra_se_host1x_submit(se, size); + return ret; }
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) @@ -135,6 +141,23 @@ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) tegra_keyslot_free(keyid); }
+void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg) +{ + u8 zkey[AES_MAX_KEY_SIZE] = {0}; + + if (!keyid) + return; + + /* Overwrite the key with 0s */ + tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg); +} + +inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + return tegra_key_insert(se, key, keylen, *keyid, alg); +} + int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid) { int ret; @@ -143,7 +166,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3 if (!tegra_key_in_kslt(*keyid)) { *keyid = tegra_keyslot_alloc(); if (!(*keyid)) { - dev_err(se->dev, "failed to allocate key slot\n"); + dev_dbg(se->dev, "failed to allocate key slot\n"); return -ENOMEM; } } diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c index 918c0b10614d..1c94f1de0546 100644 --- a/drivers/crypto/tegra/tegra-se-main.c +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi return cmdbuf; }
-int tegra_se_host1x_submit(struct tegra_se *se, u32 size) +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size) { struct host1x_job *job; int ret; @@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size) job->engine_fallback_streamid = se->stream_id; job->engine_streamid_offset = SE_STREAM_ID;
- se->cmdbuf->words = size; + cmdbuf->words = size;
- host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0); + host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
ret = host1x_job_pin(job, se->dev); if (ret) { @@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client) goto syncpt_put; }
+ se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K); + if (!se->keybuf) { + ret = -ENOMEM; + goto cmdbuf_put; + } + ret = se->hw->init_alg(se); if (ret) { dev_err(se->dev, "failed to register algorithms\n"); - goto cmdbuf_put; + goto keybuf_put; }
return 0;
+keybuf_put: + tegra_se_cmdbuf_put(&se->keybuf->bo); cmdbuf_put: tegra_se_cmdbuf_put(&se->cmdbuf->bo); syncpt_put: diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index b9dd7ceb8783..b6cac9384f66 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -24,6 +24,7 @@ #define SE_STREAM_ID 0x90
#define SE_SHA_CFG 0x4004 +#define SE_SHA_IN_ADDR 0x400c #define SE_SHA_KEY_ADDR 0x4094 #define SE_SHA_KEY_DATA 0x4098 #define SE_SHA_KEYMANIFEST 0x409c @@ -340,12 +341,14 @@ #define SE_CRYPTO_CTR_REG_COUNT 4 #define SE_MAX_KEYSLOT 15 #define SE_MAX_MEM_ALLOC SZ_4M -#define SE_AES_BUFLEN 0x8000 -#define SE_SHA_BUFLEN 0x2000 + +#define TEGRA_AES_RESERVED_KSLT 14 +#define TEGRA_XTS_RESERVED_KSLT 15
#define SHA_FIRST BIT(0) -#define SHA_UPDATE BIT(1) -#define SHA_FINAL BIT(2) +#define SHA_INIT BIT(1) +#define SHA_UPDATE BIT(2) +#define SHA_FINAL BIT(3)
/* Security Engine operation modes */ enum se_aes_alg { @@ -420,6 +423,7 @@ struct tegra_se { struct host1x_client client; struct host1x_channel *channel; struct tegra_se_cmdbuf *cmdbuf; + struct tegra_se_cmdbuf *keybuf; struct crypto_engine *engine; struct host1x_syncpt *syncpt; struct device *dev; @@ -501,8 +505,33 @@ void tegra_deinit_aes(struct tegra_se *se); void tegra_deinit_hash(struct tegra_se *se); int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid); + +int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid); + void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); -int tegra_se_host1x_submit(struct tegra_se *se, u32 size); +void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg); +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size); + +static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + *keyid = TEGRA_AES_RESERVED_KSLT; + return tegra_key_submit_reserved(se, key, keylen, alg, keyid); +} + +static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + *keyid = TEGRA_XTS_RESERVED_KSLT; + return tegra_key_submit_reserved(se, key, keylen, alg, keyid); +} + +static inline bool tegra_key_is_reserved(u32 keyid) +{ + return ((keyid == TEGRA_AES_RESERVED_KSLT) || + (keyid == TEGRA_XTS_RESERVED_KSLT)); +}
/* HOST1x OPCODES */ static inline u32 host1x_opcode_setpayload(unsigned int payload) diff --git a/drivers/dma/amd/ae4dma/ae4dma-pci.c b/drivers/dma/amd/ae4dma/ae4dma-pci.c index aad0dc4294a3..587c5a10c1a8 100644 --- a/drivers/dma/amd/ae4dma/ae4dma-pci.c +++ b/drivers/dma/amd/ae4dma/ae4dma-pci.c @@ -46,8 +46,8 @@ static int ae4_get_irqs(struct ae4_device *ae4)
} else { ae4_msix->msix_count = ret; - for (i = 0; i < MAX_AE4_HW_QUEUES; i++) - ae4->ae4_irq[i] = ae4_msix->msix_entry[i].vector; + for (i = 0; i < ae4_msix->msix_count; i++) + ae4->ae4_irq[i] = pci_irq_vector(pdev, i); }
return ret; diff --git a/drivers/dma/amd/ae4dma/ae4dma.h b/drivers/dma/amd/ae4dma/ae4dma.h index 265c5d436008..57f6048726bb 100644 --- a/drivers/dma/amd/ae4dma/ae4dma.h +++ b/drivers/dma/amd/ae4dma/ae4dma.h @@ -37,6 +37,8 @@ #define AE4_DMA_VERSION 4 #define CMD_AE4_DESC_DW0_VAL 2
+#define AE4_TIME_OUT 5000 + struct ae4_msix { int msix_count; struct msix_entry msix_entry[MAX_AE4_HW_QUEUES]; diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c index 35c84ec9608b..715ac3ae067b 100644 --- a/drivers/dma/amd/ptdma/ptdma-dmaengine.c +++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c @@ -198,8 +198,10 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan, { struct dma_async_tx_descriptor *tx_desc; struct virt_dma_desc *vd; + struct pt_device *pt; unsigned long flags;
+ pt = chan->pt; /* Loop over descriptors until one is found with commands */ do { if (desc) { @@ -217,7 +219,7 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
spin_lock_irqsave(&chan->vc.lock, flags);
- if (desc) { + if (pt->ver != AE4_DMA_VERSION && desc) { if (desc->status != DMA_COMPLETE) { if (desc->status != DMA_ERROR) desc->status = DMA_COMPLETE; @@ -235,7 +237,7 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
spin_unlock_irqrestore(&chan->vc.lock, flags);
- if (tx_desc) { + if (pt->ver != AE4_DMA_VERSION && tx_desc) { dmaengine_desc_get_callback_invoke(tx_desc, NULL); dma_run_dependencies(tx_desc); vchan_vdesc_fini(vd); @@ -245,11 +247,25 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan, return NULL; }
+static inline bool ae4_core_queue_full(struct pt_cmd_queue *cmd_q) +{ + u32 front_wi = readl(cmd_q->reg_control + AE4_WR_IDX_OFF); + u32 rear_ri = readl(cmd_q->reg_control + AE4_RD_IDX_OFF); + + if (((MAX_CMD_QLEN + front_wi - rear_ri) % MAX_CMD_QLEN) >= (MAX_CMD_QLEN - 1)) + return true; + + return false; +} + static void pt_cmd_callback(void *data, int err) { struct pt_dma_desc *desc = data; + struct ae4_cmd_queue *ae4cmd_q; struct dma_chan *dma_chan; struct pt_dma_chan *chan; + struct ae4_device *ae4; + struct pt_device *pt; int ret;
if (err == -EINPROGRESS) @@ -257,11 +273,32 @@ static void pt_cmd_callback(void *data, int err)
dma_chan = desc->vd.tx.chan; chan = to_pt_chan(dma_chan); + pt = chan->pt;
if (err) desc->status = DMA_ERROR;
while (true) { + if (pt->ver == AE4_DMA_VERSION) { + ae4 = container_of(pt, struct ae4_device, pt); + ae4cmd_q = &ae4->ae4cmd_q[chan->id]; + + if (ae4cmd_q->q_cmd_count >= (CMD_Q_LEN - 1) || + ae4_core_queue_full(&ae4cmd_q->cmd_q)) { + wake_up(&ae4cmd_q->q_w); + + if (wait_for_completion_timeout(&ae4cmd_q->cmp, + msecs_to_jiffies(AE4_TIME_OUT)) + == 0) { + dev_err(pt->dev, "TIMEOUT %d:\n", ae4cmd_q->id); + break; + } + + reinit_completion(&ae4cmd_q->cmp); + continue; + } + } + /* Check for DMA descriptor completion */ desc = pt_handle_active_desc(chan, desc);
@@ -296,6 +333,49 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan, return desc; }
+static void pt_cmd_callback_work(void *data, int err) +{ + struct dma_async_tx_descriptor *tx_desc; + struct pt_dma_desc *desc = data; + struct dma_chan *dma_chan; + struct virt_dma_desc *vd; + struct pt_dma_chan *chan; + unsigned long flags; + + dma_chan = desc->vd.tx.chan; + chan = to_pt_chan(dma_chan); + + if (err == -EINPROGRESS) + return; + + tx_desc = &desc->vd.tx; + vd = &desc->vd; + + if (err) + desc->status = DMA_ERROR; + + spin_lock_irqsave(&chan->vc.lock, flags); + if (desc) { + if (desc->status != DMA_COMPLETE) { + if (desc->status != DMA_ERROR) + desc->status = DMA_COMPLETE; + + dma_cookie_complete(tx_desc); + dma_descriptor_unmap(tx_desc); + } else { + tx_desc = NULL; + } + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + + if (tx_desc) { + dmaengine_desc_get_callback_invoke(tx_desc, NULL); + dma_run_dependencies(tx_desc); + list_del(&desc->vd.node); + vchan_vdesc_fini(vd); + } +} + static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, @@ -327,6 +407,7 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan, desc->len = len;
if (pt->ver == AE4_DMA_VERSION) { + pt_cmd->pt_cmd_callback = pt_cmd_callback_work; ae4 = container_of(pt, struct ae4_device, pt); ae4cmd_q = &ae4->ae4cmd_q[chan->id]; mutex_lock(&ae4cmd_q->cmd_lock); @@ -367,13 +448,16 @@ static void pt_issue_pending(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_dma_desc *desc; + struct pt_device *pt; unsigned long flags; bool engine_is_idle = true;
+ pt = chan->pt; + spin_lock_irqsave(&chan->vc.lock, flags);
desc = pt_next_dma_desc(chan); - if (desc) + if (desc && pt->ver != AE4_DMA_VERSION) engine_is_idle = false;
vchan_issue_pending(&chan->vc); diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index f989b6c9c0a9..e3e0e88a76d3 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -401,6 +401,7 @@ fsl_edma2_irq_init(struct platform_device *pdev,
/* The last IRQ is for eDMA err */ if (i == count - 1) { + fsl_edma->errirq = irq; ret = devm_request_irq(&pdev->dev, irq, fsl_edma_err_handler, 0, "eDMA2-ERR", fsl_edma); @@ -420,10 +421,13 @@ static void fsl_edma_irq_exit( struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) { if (fsl_edma->txirq == fsl_edma->errirq) { - devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); + if (fsl_edma->txirq >= 0) + devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); } else { - devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); - devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma); + if (fsl_edma->txirq >= 0) + devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); + if (fsl_edma->errirq >= 0) + devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma); } }
@@ -620,6 +624,8 @@ static int fsl_edma_probe(struct platform_device *pdev) if (!fsl_edma) return -ENOMEM;
+ fsl_edma->errirq = -EINVAL; + fsl_edma->txirq = -EINVAL; fsl_edma->drvdata = drvdata; fsl_edma->n_chans = chans; mutex_init(&fsl_edma->fsl_edma_mutex); @@ -802,9 +808,9 @@ static void fsl_edma_remove(struct platform_device *pdev) struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
fsl_edma_irq_exit(pdev, fsl_edma); - fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); + fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); }
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index f45d849d3f15..355a977019e9 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -751,6 +751,8 @@ static int i10nm_get_ddr_munits(void) continue; } else { d->imc[lmc].mdev = mdev; + if (res_cfg->type == SPR) + skx_set_mc_mapping(d, i, lmc); lmc++; } } diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 4fc16922dc1a..9b02a6b43ab5 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -94,8 +94,6 @@ (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \ PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
-#define IE31200_DIMMS 4 -#define IE31200_RANKS 8 #define IE31200_RANKS_PER_CHANNEL 4 #define IE31200_DIMMS_PER_CHANNEL 2 #define IE31200_CHANNELS 2 @@ -167,6 +165,7 @@ #define IE31200_MAD_DIMM_0_OFFSET 0x5004 #define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C #define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0) +#define IE31200_MAD_DIMM_SIZE_SKL GENMASK_ULL(5, 0) #define IE31200_MAD_DIMM_A_RANK BIT(17) #define IE31200_MAD_DIMM_A_RANK_SHIFT 17 #define IE31200_MAD_DIMM_A_RANK_SKL BIT(10) @@ -380,7 +379,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev) static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan) { - dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE; + dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE_SKL; dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0; dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >> (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4))); @@ -429,7 +428,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
nr_channels = how_many_channels(pdev); layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; - layers[0].size = IE31200_DIMMS; + layers[0].size = IE31200_RANKS_PER_CHANNEL; layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; layers[1].size = nr_channels; @@ -622,7 +621,7 @@ static int __init ie31200_init(void)
pci_rc = pci_register_driver(&ie31200_driver); if (pci_rc < 0) - goto fail0; + return pci_rc;
if (!mci_pdev) { ie31200_registered = 0; @@ -633,11 +632,13 @@ static int __init ie31200_init(void) if (mci_pdev) break; } + if (!mci_pdev) { edac_dbg(0, "ie31200 pci_get_device fail\n"); pci_rc = -ENODEV; - goto fail1; + goto fail0; } + pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]); if (pci_rc < 0) { edac_dbg(0, "ie31200 init fail\n"); @@ -645,12 +646,12 @@ static int __init ie31200_init(void) goto fail1; } } - return 0;
+ return 0; fail1: - pci_unregister_driver(&ie31200_driver); -fail0: pci_dev_put(mci_pdev); +fail0: + pci_unregister_driver(&ie31200_driver);
return pci_rc; } diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index fdf3a84fe698..595908af9e5c 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -785,13 +785,22 @@ static u64 ecclog_read_and_clear(struct igen6_imc *imc) { u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET);
- if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) { - /* Clear CE/UE bits by writing 1s */ - writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET); - return ecclog; - } + /* + * Quirk: The ECC_ERROR_LOG register of certain SoCs may contain + * the invalid value ~0. This will result in a flood of invalid + * error reports in polling mode. Skip it. + */ + if (ecclog == ~0) + return 0;
- return 0; + /* Neither a CE nor a UE. Skip it.*/ + if (!(ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE))) + return 0; + + /* Clear CE/UE bits by writing 1s */ + writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET); + + return ecclog; }
static void errsts_clear(struct igen6_imc *imc) diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index f7bd930e058f..fa5b442b1844 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -121,6 +121,35 @@ void skx_adxl_put(void) } EXPORT_SYMBOL_GPL(skx_adxl_put);
+static void skx_init_mc_mapping(struct skx_dev *d) +{ + /* + * By default, the BIOS presents all memory controllers within each + * socket to the EDAC driver. The physical indices are the same as + * the logical indices of the memory controllers enumerated by the + * EDAC driver. + */ + for (int i = 0; i < NUM_IMC; i++) + d->mc_mapping[i] = i; +} + +void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc) +{ + edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n", + pmc, lmc); + + d->mc_mapping[pmc] = lmc; +} +EXPORT_SYMBOL_GPL(skx_set_mc_mapping); + +static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc) +{ + edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n", + pmc, d->mc_mapping[pmc]); + + return d->mc_mapping[pmc]; +} + static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src) { struct skx_dev *d; @@ -188,6 +217,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src) return false; }
+ res->imc = skx_get_mc_mapping(d, res->imc); + for (i = 0; i < adxl_component_count; i++) { if (adxl_values[i] == ~0x0ull) continue; @@ -326,6 +357,8 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list) d->bus[0], d->bus[1], d->bus[2], d->bus[3]); list_add_tail(&d->list, &dev_edac_list); prev = pdev; + + skx_init_mc_mapping(d); }
if (list) diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h index b0845bdd4516..ca5408803f87 100644 --- a/drivers/edac/skx_common.h +++ b/drivers/edac/skx_common.h @@ -93,6 +93,16 @@ struct skx_dev { struct pci_dev *uracu; /* for i10nm CPU */ struct pci_dev *pcu_cr3; /* for HBM memory detection */ u32 mcroute; + /* + * Some server BIOS may hide certain memory controllers, and the + * EDAC driver skips those hidden memory controllers. However, the + * ADXL still decodes memory error address using physical memory + * controller indices. The mapping table is used to convert the + * physical indices (reported by ADXL) to the logical indices + * (used the EDAC driver) of present memory controllers during the + * error handling process. + */ + u8 mc_mapping[NUM_IMC]; struct skx_imc { struct mem_ctl_info *mci; struct pci_dev *mdev; /* for i10nm CPU */ @@ -242,6 +252,7 @@ void skx_adxl_put(void); void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log); void skx_set_mem_cfg(bool mem_cfg_2lm); void skx_set_res_cfg(struct res_config *cfg); +void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index dfda5ffc14db..fa09a82b4492 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -160,11 +160,12 @@ static int __ffa_devices_unregister(struct device *dev, void *data) return 0; }
-static void ffa_devices_unregister(void) +void ffa_devices_unregister(void) { bus_for_each_dev(&ffa_bus_type, NULL, NULL, __ffa_devices_unregister); } +EXPORT_SYMBOL_GPL(ffa_devices_unregister);
bool ffa_device_is_valid(struct ffa_device *ffa_dev) { diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 2c2ec3c35f15..655672a88095 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -145,7 +145,7 @@ static int ffa_version_check(u32 *version) .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION, }, &ver);
- if (ver.a0 == FFA_RET_NOT_SUPPORTED) { + if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) { pr_info("FFA_VERSION returned not supported\n"); return -EOPNOTSUPP; } @@ -899,7 +899,7 @@ static void ffa_notification_info_get(void) }, &ret);
if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) { - if (ret.a2 != FFA_RET_NO_DATA) + if ((s32)ret.a2 != FFA_RET_NO_DATA) pr_err("Notification Info fetch failed: 0x%lx (0x%lx)", ret.a0, ret.a2); return; @@ -935,7 +935,7 @@ static void ffa_notification_info_get(void) }
/* Per vCPU Notification */ - for (idx = 0; idx < ids_count[list]; idx++) { + for (idx = 1; idx < ids_count[list]; idx++) { if (ids_processed >= max_ids - 1) break;
@@ -1384,11 +1384,30 @@ static struct notifier_block ffa_bus_nb = { .notifier_call = ffa_bus_notifier, };
+static int ffa_xa_add_partition_info(int vm_id) +{ + struct ffa_dev_part_info *info; + int ret; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + rwlock_init(&info->rw_lock); + ret = xa_insert(&drv_info->partition_info, vm_id, info, GFP_KERNEL); + if (ret) { + pr_err("%s: failed to save partition ID 0x%x - ret:%d. Abort.\n", + __func__, vm_id, ret); + kfree(info); + } + + return ret; +} + static int ffa_setup_partitions(void) { int count, idx, ret; struct ffa_device *ffa_dev; - struct ffa_dev_part_info *info; struct ffa_partition_info *pbuf, *tpbuf;
if (drv_info->version == FFA_VERSION_1_0) { @@ -1422,42 +1441,18 @@ static int ffa_setup_partitions(void) !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) ffa_mode_32bit_set(ffa_dev);
- info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) { + if (ffa_xa_add_partition_info(ffa_dev->vm_id)) { ffa_device_unregister(ffa_dev); continue; } - rwlock_init(&info->rw_lock); - ret = xa_insert(&drv_info->partition_info, tpbuf->id, - info, GFP_KERNEL); - if (ret) { - pr_err("%s: failed to save partition ID 0x%x - ret:%d\n", - __func__, tpbuf->id, ret); - ffa_device_unregister(ffa_dev); - kfree(info); - } }
kfree(pbuf);
/* Allocate for the host */ - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) { - /* Already registered devices are freed on bus_exit */ - ffa_partitions_cleanup(); - return -ENOMEM; - } - - rwlock_init(&info->rw_lock); - ret = xa_insert(&drv_info->partition_info, drv_info->vm_id, - info, GFP_KERNEL); - if (ret) { - pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n", - __func__, drv_info->vm_id, ret); - kfree(info); - /* Already registered devices are freed on bus_exit */ + ret = ffa_xa_add_partition_info(drv_info->vm_id); + if (ret) ffa_partitions_cleanup(); - }
return ret; } @@ -1467,6 +1462,9 @@ static void ffa_partitions_cleanup(void) struct ffa_dev_part_info *info; unsigned long idx;
+ /* Clean up/free all registered devices */ + ffa_devices_unregister(); + xa_for_each(&drv_info->partition_info, idx, info) { xa_erase(&drv_info->partition_info, idx); kfree(info); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 60050da54bf2..1c75a4c9c371 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -1997,17 +1997,7 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) else if (db->width == 4) SCMI_PROTO_FC_RING_DB(32); else /* db->width == 8 */ -#ifdef CONFIG_64BIT SCMI_PROTO_FC_RING_DB(64); -#else - { - u64 val = 0; - - if (db->mask) - val = ioread64_hi_lo(db->addr) & db->mask; - iowrite64_hi_lo(db->set | val, db->addr); - } -#endif }
/** diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 42433c19eb30..560724ce21aa 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -1631,6 +1631,7 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
cs_dsp_debugfs_save_wmfwname(dsp, file);
+ ret = 0; out_fw: cs_dsp_buf_free(&buf_list);
@@ -2338,6 +2339,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_debugfs_save_binname(dsp, file);
+ ret = 0; out_fw: cs_dsp_buf_free(&buf_list);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 018dfccd771b..f5909977eed4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4223,7 +4223,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); - mutex_init(&adev->virt.rlcg_reg_lock); hash_init(adev->mn_hash); mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); @@ -4249,6 +4248,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->se_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); spin_lock_init(&adev->mm_stats.lock); + spin_lock_init(&adev->virt.rlcg_reg_lock); spin_lock_init(&adev->wb.lock);
INIT_LIST_HEAD(&adev->reset_list); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 709c11cbeabd..6fa20980a0b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -145,9 +145,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev) adev->mes.vmid_mask_gfxhub = 0xffffff00;
for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) { - /* use only 1st MEC pipes */ - if (i >= adev->gfx.mec.num_pipe_per_mec) - continue; + if (i >= (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec)) + break; adev->mes.compute_hqd_mask[i] = 0xc; }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index dde15c6a96e1..a7f2648245ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -32,462 +32,7 @@ #include "amdgpu_umsch_mm.h" #include "umsch_mm_v4_0.h"
-struct umsch_mm_test_ctx_data { - uint8_t process_csa[PAGE_SIZE]; - uint8_t vpe_ctx_csa[PAGE_SIZE]; - uint8_t vcn_ctx_csa[PAGE_SIZE]; -}; - -struct umsch_mm_test_mqd_data { - uint8_t vpe_mqd[PAGE_SIZE]; - uint8_t vcn_mqd[PAGE_SIZE]; -}; - -struct umsch_mm_test_ring_data { - uint8_t vpe_ring[PAGE_SIZE]; - uint8_t vpe_ib[PAGE_SIZE]; - uint8_t vcn_ring[PAGE_SIZE]; - uint8_t vcn_ib[PAGE_SIZE]; -}; - -struct umsch_mm_test_queue_info { - uint64_t mqd_addr; - uint64_t csa_addr; - uint32_t doorbell_offset_0; - uint32_t doorbell_offset_1; - enum UMSCH_SWIP_ENGINE_TYPE engine; -}; - -struct umsch_mm_test { - struct amdgpu_bo *ctx_data_obj; - uint64_t ctx_data_gpu_addr; - uint32_t *ctx_data_cpu_addr; - - struct amdgpu_bo *mqd_data_obj; - uint64_t mqd_data_gpu_addr; - uint32_t *mqd_data_cpu_addr; - - struct amdgpu_bo *ring_data_obj; - uint64_t ring_data_gpu_addr; - uint32_t *ring_data_cpu_addr; - - - struct amdgpu_vm *vm; - struct amdgpu_bo_va *bo_va; - uint32_t pasid; - uint32_t vm_cntx_cntl; - uint32_t num_queues; -}; - -static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, - uint64_t addr, uint32_t size) -{ - struct amdgpu_sync sync; - struct drm_exec exec; - int r; - - amdgpu_sync_create(&sync); - - drm_exec_init(&exec, 0, 0); - drm_exec_until_all_locked(&exec) { - r = drm_exec_lock_obj(&exec, &bo->tbo.base); - drm_exec_retry_on_contention(&exec); - if (unlikely(r)) - goto error_fini_exec; - - r = amdgpu_vm_lock_pd(vm, &exec, 0); - drm_exec_retry_on_contention(&exec); - if (unlikely(r)) - goto error_fini_exec; - } - - *bo_va = amdgpu_vm_bo_add(adev, vm, bo); - if (!*bo_va) { - r = -ENOMEM; - goto error_fini_exec; - } - - r = amdgpu_vm_bo_map(adev, *bo_va, addr, 0, size, - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); - - if (r) - goto error_del_bo_va; - - - r = amdgpu_vm_bo_update(adev, *bo_va, false); - if (r) - goto error_del_bo_va; - - amdgpu_sync_fence(&sync, (*bo_va)->last_pt_update); - - r = amdgpu_vm_update_pdes(adev, vm, false); - if (r) - goto error_del_bo_va; - - amdgpu_sync_fence(&sync, vm->last_update); - - amdgpu_sync_wait(&sync, false); - drm_exec_fini(&exec); - - amdgpu_sync_free(&sync); - - return 0; - -error_del_bo_va: - amdgpu_vm_bo_del(adev, *bo_va); - amdgpu_sync_free(&sync); - -error_fini_exec: - drm_exec_fini(&exec); - amdgpu_sync_free(&sync); - return r; -} - -static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va, - uint64_t addr) -{ - struct drm_exec exec; - long r; - - drm_exec_init(&exec, 0, 0); - drm_exec_until_all_locked(&exec) { - r = drm_exec_lock_obj(&exec, &bo->tbo.base); - drm_exec_retry_on_contention(&exec); - if (unlikely(r)) - goto out_unlock; - - r = amdgpu_vm_lock_pd(vm, &exec, 0); - drm_exec_retry_on_contention(&exec); - if (unlikely(r)) - goto out_unlock; - } - - - r = amdgpu_vm_bo_unmap(adev, bo_va, addr); - if (r) - goto out_unlock; - - amdgpu_vm_bo_del(adev, bo_va); - -out_unlock: - drm_exec_fini(&exec); - - return r; -} - -static void setup_vpe_queue(struct amdgpu_device *adev, - struct umsch_mm_test *test, - struct umsch_mm_test_queue_info *qinfo) -{ - struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr; - uint64_t ring_gpu_addr = test->ring_data_gpu_addr; - - mqd->rb_base_lo = (ring_gpu_addr >> 8); - mqd->rb_base_hi = (ring_gpu_addr >> 40); - mqd->rb_size = PAGE_SIZE / 4; - mqd->wptr_val = 0; - mqd->rptr_val = 0; - mqd->unmapped = 1; - - if (adev->vpe.collaborate_mode) - memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO)); - - qinfo->mqd_addr = test->mqd_data_gpu_addr; - qinfo->csa_addr = test->ctx_data_gpu_addr + - offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa); - qinfo->doorbell_offset_0 = 0; - qinfo->doorbell_offset_1 = 0; -} - -static void setup_vcn_queue(struct amdgpu_device *adev, - struct umsch_mm_test *test, - struct umsch_mm_test_queue_info *qinfo) -{ -} - -static int add_test_queue(struct amdgpu_device *adev, - struct umsch_mm_test *test, - struct umsch_mm_test_queue_info *qinfo) -{ - struct umsch_mm_add_queue_input queue_input = {}; - int r; - - queue_input.process_id = test->pasid; - queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(test->vm->root.bo); - - queue_input.process_va_start = 0; - queue_input.process_va_end = (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT; - - queue_input.process_quantum = 100000; /* 10ms */ - queue_input.process_csa_addr = test->ctx_data_gpu_addr + - offsetof(struct umsch_mm_test_ctx_data, process_csa); - - queue_input.context_quantum = 10000; /* 1ms */ - queue_input.context_csa_addr = qinfo->csa_addr; - - queue_input.inprocess_context_priority = CONTEXT_PRIORITY_LEVEL_NORMAL; - queue_input.context_global_priority_level = CONTEXT_PRIORITY_LEVEL_NORMAL; - queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0; - queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1; - - queue_input.engine_type = qinfo->engine; - queue_input.mqd_addr = qinfo->mqd_addr; - queue_input.vm_context_cntl = test->vm_cntx_cntl; - - amdgpu_umsch_mm_lock(&adev->umsch_mm); - r = adev->umsch_mm.funcs->add_queue(&adev->umsch_mm, &queue_input); - amdgpu_umsch_mm_unlock(&adev->umsch_mm); - if (r) - return r; - - return 0; -} - -static int remove_test_queue(struct amdgpu_device *adev, - struct umsch_mm_test *test, - struct umsch_mm_test_queue_info *qinfo) -{ - struct umsch_mm_remove_queue_input queue_input = {}; - int r; - - queue_input.doorbell_offset_0 = qinfo->doorbell_offset_0; - queue_input.doorbell_offset_1 = qinfo->doorbell_offset_1; - queue_input.context_csa_addr = qinfo->csa_addr; - - amdgpu_umsch_mm_lock(&adev->umsch_mm); - r = adev->umsch_mm.funcs->remove_queue(&adev->umsch_mm, &queue_input); - amdgpu_umsch_mm_unlock(&adev->umsch_mm); - if (r) - return r; - - return 0; -} - -static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *test) -{ - struct MQD_INFO *mqd = (struct MQD_INFO *)test->mqd_data_cpu_addr; - uint32_t *ring = test->ring_data_cpu_addr + - offsetof(struct umsch_mm_test_ring_data, vpe_ring) / 4; - uint32_t *ib = test->ring_data_cpu_addr + - offsetof(struct umsch_mm_test_ring_data, vpe_ib) / 4; - uint64_t ib_gpu_addr = test->ring_data_gpu_addr + - offsetof(struct umsch_mm_test_ring_data, vpe_ib); - uint32_t *fence = ib + 2048 / 4; - uint64_t fence_gpu_addr = ib_gpu_addr + 2048; - const uint32_t test_pattern = 0xdeadbeef; - int i; - - ib[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0); - ib[1] = lower_32_bits(fence_gpu_addr); - ib[2] = upper_32_bits(fence_gpu_addr); - ib[3] = test_pattern; - - ring[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0); - ring[1] = (ib_gpu_addr & 0xffffffe0); - ring[2] = upper_32_bits(ib_gpu_addr); - ring[3] = 4; - ring[4] = 0; - ring[5] = 0; - - mqd->wptr_val = (6 << 2); - if (adev->vpe.collaborate_mode) - (++mqd)->wptr_val = (6 << 2); - - WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val); - - for (i = 0; i < adev->usec_timeout; i++) { - if (*fence == test_pattern) - return 0; - udelay(1); - } - - dev_err(adev->dev, "vpe queue submission timeout\n"); - - return -ETIMEDOUT; -} - -static int submit_vcn_queue(struct amdgpu_device *adev, struct umsch_mm_test *test) -{ - return 0; -} - -static int setup_umsch_mm_test(struct amdgpu_device *adev, - struct umsch_mm_test *test) -{ - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; - int r; - - test->vm_cntx_cntl = hub->vm_cntx_cntl; - - test->vm = kzalloc(sizeof(*test->vm), GFP_KERNEL); - if (!test->vm) { - r = -ENOMEM; - return r; - } - - r = amdgpu_vm_init(adev, test->vm, -1); - if (r) - goto error_free_vm; - - r = amdgpu_pasid_alloc(16); - if (r < 0) - goto error_fini_vm; - test->pasid = r; - - r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ctx_data), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, - &test->ctx_data_obj, - &test->ctx_data_gpu_addr, - (void **)&test->ctx_data_cpu_addr); - if (r) - goto error_free_pasid; - - memset(test->ctx_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ctx_data)); - - r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, - &test->mqd_data_obj, - &test->mqd_data_gpu_addr, - (void **)&test->mqd_data_cpu_addr); - if (r) - goto error_free_ctx_data_obj; - - memset(test->mqd_data_cpu_addr, 0, PAGE_SIZE); - - r = amdgpu_bo_create_kernel(adev, sizeof(struct umsch_mm_test_ring_data), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, - &test->ring_data_obj, - NULL, - (void **)&test->ring_data_cpu_addr); - if (r) - goto error_free_mqd_data_obj; - - memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data)); - - test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM; - r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va, - test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data)); - if (r) - goto error_free_ring_data_obj; - - return 0; - -error_free_ring_data_obj: - amdgpu_bo_free_kernel(&test->ring_data_obj, NULL, - (void **)&test->ring_data_cpu_addr); -error_free_mqd_data_obj: - amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr, - (void **)&test->mqd_data_cpu_addr); -error_free_ctx_data_obj: - amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr, - (void **)&test->ctx_data_cpu_addr); -error_free_pasid: - amdgpu_pasid_free(test->pasid); -error_fini_vm: - amdgpu_vm_fini(adev, test->vm); -error_free_vm: - kfree(test->vm); - - return r; -} - -static void cleanup_umsch_mm_test(struct amdgpu_device *adev, - struct umsch_mm_test *test) -{ - unmap_ring_data(adev, test->vm, test->ring_data_obj, - test->bo_va, test->ring_data_gpu_addr); - amdgpu_bo_free_kernel(&test->mqd_data_obj, &test->mqd_data_gpu_addr, - (void **)&test->mqd_data_cpu_addr); - amdgpu_bo_free_kernel(&test->ring_data_obj, NULL, - (void **)&test->ring_data_cpu_addr); - amdgpu_bo_free_kernel(&test->ctx_data_obj, &test->ctx_data_gpu_addr, - (void **)&test->ctx_data_cpu_addr); - amdgpu_pasid_free(test->pasid); - amdgpu_vm_fini(adev, test->vm); - kfree(test->vm); -} - -static int setup_test_queues(struct amdgpu_device *adev, - struct umsch_mm_test *test, - struct umsch_mm_test_queue_info *qinfo) -{ - int i, r; - - for (i = 0; i < test->num_queues; i++) { - if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE) - setup_vpe_queue(adev, test, &qinfo[i]); - else - setup_vcn_queue(adev, test, &qinfo[i]); - - r = add_test_queue(adev, test, &qinfo[i]); - if (r) - return r; - } - - return 0; -} - -static int submit_test_queues(struct amdgpu_device *adev, - struct umsch_mm_test *test, - struct umsch_mm_test_queue_info *qinfo) -{ - int i, r; - - for (i = 0; i < test->num_queues; i++) { - if (qinfo[i].engine == UMSCH_SWIP_ENGINE_TYPE_VPE) - r = submit_vpe_queue(adev, test); - else - r = submit_vcn_queue(adev, test); - if (r) - return r; - } - - return 0; -} - -static void cleanup_test_queues(struct amdgpu_device *adev, - struct umsch_mm_test *test, - struct umsch_mm_test_queue_info *qinfo) -{ - int i; - - for (i = 0; i < test->num_queues; i++) - remove_test_queue(adev, test, &qinfo[i]); -} - -static int umsch_mm_test(struct amdgpu_device *adev) -{ - struct umsch_mm_test_queue_info qinfo[] = { - { .engine = UMSCH_SWIP_ENGINE_TYPE_VPE }, - }; - struct umsch_mm_test test = { .num_queues = ARRAY_SIZE(qinfo) }; - int r; - - r = setup_umsch_mm_test(adev, &test); - if (r) - return r; - - r = setup_test_queues(adev, &test, qinfo); - if (r) - goto cleanup; - - r = submit_test_queues(adev, &test, qinfo); - if (r) - goto cleanup; - - cleanup_test_queues(adev, &test, qinfo); - cleanup_umsch_mm_test(adev, &test); - - return 0; - -cleanup: - cleanup_test_queues(adev, &test, qinfo); - cleanup_umsch_mm_test(adev, &test); - return r; -} +MODULE_FIRMWARE("amdgpu/umsch_mm_4_0_0.bin");
int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws) { @@ -584,7 +129,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch) fw_name = "amdgpu/umsch_mm_4_0_0.bin"; break; default: - break; + return -EINVAL; }
r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, AMDGPU_UCODE_REQUIRED, @@ -792,7 +337,7 @@ static int umsch_mm_late_init(struct amdgpu_ip_block *ip_block) if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend) return 0;
- return umsch_mm_test(adev); + return 0; }
static int umsch_mm_sw_init(struct amdgpu_ip_block *ip_block) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 0af469ec6fcc..13e5709ea1ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -1017,6 +1017,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f void *scratch_reg2; void *scratch_reg3; void *spare_int; + unsigned long flags;
if (!adev->gfx.rlc.rlcg_reg_access_supported) { dev_err(adev->dev, @@ -1038,7 +1039,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
- mutex_lock(&adev->virt.rlcg_reg_lock); + spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
if (reg_access_ctrl->spare_int) spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int; @@ -1097,7 +1098,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
ret = readl(scratch_reg0);
- mutex_unlock(&adev->virt.rlcg_reg_lock); + spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 5381b8d596e6..0ca73343a768 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -279,7 +279,8 @@ struct amdgpu_virt { /* the ucode id to signal the autoload */ uint32_t autoload_ucode_id;
- struct mutex rlcg_reg_lock; + /* Spinlock to protect access to the RLCG register interface */ + spinlock_t rlcg_reg_lock;
union amd_sriov_ras_caps ras_en_caps; union amd_sriov_ras_caps ras_telemetry_en_caps; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 56c06b72a70a..cfb51baa581a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1559,7 +1559,7 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; - adev->gfx.mec.num_mec = 2; + adev->gfx.mec.num_mec = 1; adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 4; break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 48ff00427882..c21b168f75a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -1337,7 +1337,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; - adev->gfx.mec.num_mec = 2; + adev->gfx.mec.num_mec = 1; adev->gfx.mec.num_pipe_per_mec = 2; adev->gfx.mec.num_queue_per_pipe = 4; break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 0dce4421418c..eda0dc83714a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1269,6 +1269,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.mec_fw_write_wait = false;
if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) && + (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) && ((adev->gfx.mec_fw_version < 0x000001a5) || (adev->gfx.mec_feature_version < 46) || (adev->gfx.pfp_fw_version < 0x000000b7) || diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index 8b463c977d08..8b0b3739a537 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -575,8 +575,10 @@ static int vcn_v5_0_1_start(struct amdgpu_device *adev) uint32_t tmp; int i, j, k, r, vcn_inst;
- if (adev->pm.dpm_enabled) - amdgpu_dpm_enable_uvd(adev, true); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_vcn(adev, true, i); + }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; @@ -816,8 +818,10 @@ static int vcn_v5_0_1_stop(struct amdgpu_device *adev) WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); }
- if (adev->pm.dpm_enabled) - amdgpu_dpm_enable_uvd(adev, false); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_vcn(adev, false, i); + }
return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 34c2c42c0f95..ad9cb50a9fa3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -207,21 +207,6 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, if (!down_read_trylock(&adev->reset_domain->sem)) return -EIO;
- if (!pdd->proc_ctx_cpu_ptr) { - r = amdgpu_amdkfd_alloc_gtt_mem(adev, - AMDGPU_MES_PROC_CTX_SIZE, - &pdd->proc_ctx_bo, - &pdd->proc_ctx_gpu_addr, - &pdd->proc_ctx_cpu_ptr, - false); - if (r) { - dev_err(adev->dev, - "failed to allocate process context bo\n"); - return r; - } - memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); - } - memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); queue_input.process_id = qpd->pqm->process->pasid; queue_input.page_table_base_addr = qpd->page_table_base; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index bd36a75309e1..6c02bc36d634 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -363,10 +363,26 @@ int pqm_create_queue(struct process_queue_manager *pqm, if (retval != 0) return retval;
+ /* Register process if this is the first queue */ if (list_empty(&pdd->qpd.queues_list) && list_empty(&pdd->qpd.priv_queue_list)) dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
+ /* Allocate proc_ctx_bo only if MES is enabled and this is the first queue */ + if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) { + retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, + AMDGPU_MES_PROC_CTX_SIZE, + &pdd->proc_ctx_bo, + &pdd->proc_ctx_gpu_addr, + &pdd->proc_ctx_cpu_ptr, + false); + if (retval) { + dev_err(dev->adev->dev, "failed to allocate process context bo\n"); + return retval; + } + memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); + } + pqn = kzalloc(sizeof(*pqn), GFP_KERNEL); if (!pqn) { retval = -ENOMEM; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 6e2fce329d73..d37ecfdde4f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -63,6 +63,10 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
bool should_use_dmub_lock(struct dc_link *link) { + /* ASIC doesn't support DMUB */ + if (!link->ctx->dmub_srv) + return false; + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index cee1b351e105..f1fe49401bc0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -281,10 +281,10 @@ static void CalculateDynamicMetadataParameters( double DISPCLK, double DCFClkDeepSleep, double PixelClock, - long HTotal, - long VBlank, - long DynamicMetadataTransmittedBytes, - long DynamicMetadataLinesBeforeActiveRequired, + unsigned int HTotal, + unsigned int VBlank, + unsigned int DynamicMetadataTransmittedBytes, + int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, double *Tsetup, @@ -3265,8 +3265,8 @@ static double CalculateWriteBackDelay(
static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK, - double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes, - long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, + double DCFClkDeepSleep, double PixelClock, unsigned int HTotal, unsigned int VBlank, unsigned int DynamicMetadataTransmittedBytes, + int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks) { double TotalRepeaterDelayTime = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c index d68b4567e218..7216d25c783e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c @@ -141,9 +141,8 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out) core->clean_me_up.mode_lib.ip.subvp_fw_processing_delay_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us; core->clean_me_up.mode_lib.ip.subvp_swath_height_margin_lines = core_dcn4_ip_caps_base.subvp_swath_height_margin_lines; } else { - memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params)); + memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params)); patch_ip_params_with_ip_caps(&core->clean_me_up.mode_lib.ip, in_out->ip_caps); - core->clean_me_up.mode_lib.ip.imall_supported = false; }
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 9f55207ea9bc..d834d134ad2b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -459,8 +459,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, } if (read_arg) { smu_cmn_read_arg(smu, read_arg); - dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\ - readval: 0x%08x\n", + dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n", smu_get_message_name(smu, msg), index, param, reg, *read_arg); } else { dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n", diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index d081850e3c03..d4e4f484cbe5 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -2463,9 +2463,9 @@ static int cdns_mhdp_probe(struct platform_device *pdev) if (!mhdp) return -ENOMEM;
- clk = devm_clk_get(dev, NULL); + clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(clk)) { - dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk)); + dev_err(dev, "couldn't get and enable clk: %ld\n", PTR_ERR(clk)); return PTR_ERR(clk); }
@@ -2504,14 +2504,12 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->info = of_device_get_match_data(dev);
- clk_prepare_enable(clk); - pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "pm_runtime_resume_and_get failed\n"); pm_runtime_disable(dev); - goto clk_disable; + return ret; }
if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) { @@ -2590,8 +2588,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev) runtime_put: pm_runtime_put_sync(dev); pm_runtime_disable(dev); -clk_disable: - clk_disable_unprepare(mhdp->clk);
return ret; } @@ -2632,8 +2628,6 @@ static void cdns_mhdp_remove(struct platform_device *pdev) cancel_work_sync(&mhdp->modeset_retry_work); flush_work(&mhdp->hpd_work); /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */ - - clk_disable_unprepare(mhdp->clk); }
static const struct of_device_id mhdp_ids[] = { diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 88ef76a37fe6..76dabca04d0d 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -2250,12 +2250,13 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505) continue; }
- for (i = 0; i < 5; i++) { + for (i = 0; i < 5; i++) if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] || - av[i][1] != av[i][2] || bv[i][0] != av[i][3]) + bv[i][1] != av[i][2] || bv[i][0] != av[i][3]) break;
- DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i); + if (i == 5) { + DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d", retry); return true; } } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index e4d9006b59f1..b3d617505dda 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -480,6 +480,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, const char *name) { struct device *dev = pdata->dev; + const struct i2c_client *client = to_i2c_client(dev); struct auxiliary_device *aux; int ret;
@@ -488,6 +489,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, return -ENOMEM;
aux->name = name; + aux->id = (client->adapter->nr << 10) | client->addr; aux->dev.parent = dev; aux->dev.release = ti_sn65dsi86_aux_device_release; device_set_of_node_from_dev(&aux->dev, dev); diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 6d09bef671da..314b394cb7e1 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -175,13 +175,13 @@ static int drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len) { int i; - u8 unpacked_rad[16]; + u8 unpacked_rad[16] = {};
- for (i = 0; i < lct; i++) { + for (i = 1; i < lct; i++) { if (i % 2) - unpacked_rad[i] = rad[i / 2] >> 4; + unpacked_rad[i] = rad[(i - 1) / 2] >> 4; else - unpacked_rad[i] = rad[i / 2] & BIT_MASK(4); + unpacked_rad[i] = rad[(i - 1) / 2] & 0xF; }
/* TODO: Eventually add something to printk so we can format the rad diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 2289e71e2fa2..c299cd94d3f7 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -830,8 +830,11 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) } EXPORT_SYMBOL(drm_send_event);
-static void print_size(struct drm_printer *p, const char *stat, - const char *region, u64 sz) +void drm_fdinfo_print_size(struct drm_printer *p, + const char *prefix, + const char *stat, + const char *region, + u64 sz) { const char *units[] = {"", " KiB", " MiB"}; unsigned u; @@ -842,8 +845,10 @@ static void print_size(struct drm_printer *p, const char *stat, sz = div_u64(sz, SZ_1K); }
- drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]); + drm_printf(p, "%s-%s-%s:\t%llu%s\n", + prefix, stat, region, sz, units[u]); } +EXPORT_SYMBOL(drm_fdinfo_print_size);
int drm_memory_stats_is_zero(const struct drm_memory_stats *stats) { @@ -868,17 +873,22 @@ void drm_print_memory_stats(struct drm_printer *p, enum drm_gem_object_status supported_status, const char *region) { - print_size(p, "total", region, stats->private + stats->shared); - print_size(p, "shared", region, stats->shared); + const char *prefix = "drm"; + + drm_fdinfo_print_size(p, prefix, "total", region, + stats->private + stats->shared); + drm_fdinfo_print_size(p, prefix, "shared", region, stats->shared);
if (supported_status & DRM_GEM_OBJECT_ACTIVE) - print_size(p, "active", region, stats->active); + drm_fdinfo_print_size(p, prefix, "active", region, stats->active);
if (supported_status & DRM_GEM_OBJECT_RESIDENT) - print_size(p, "resident", region, stats->resident); + drm_fdinfo_print_size(p, prefix, "resident", region, + stats->resident);
if (supported_status & DRM_GEM_OBJECT_PURGEABLE) - print_size(p, "purgeable", region, stats->purgeable); + drm_fdinfo_print_size(p, prefix, "purgeable", region, + stats->purgeable); } EXPORT_SYMBOL(drm_print_memory_stats);
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 5674f5707cca..8f6fba4217ec 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -620,13 +620,16 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); + goto update_config_out; } -#else +#endif spin_lock_irqsave(&mtk_crtc->config_lock, flags); mtk_crtc->config_updating = false; spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); -#endif
+#if IS_REACHABLE(CONFIG_MTK_CMDQ) +update_config_out: +#endif mutex_unlock(&mtk_crtc->hw_lock); }
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index cd385ba4c66a..d2cf09124d10 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -1766,7 +1766,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
ret = drm_dp_dpcd_readb(&mtk_dp->aux, DP_MSTM_CAP, &val); if (ret < 1) { - drm_err(mtk_dp->drm_dev, "Read mstm cap failed\n"); + dev_err(mtk_dp->dev, "Read mstm cap failed: %zd\n", ret); return ret == 0 ? -EIO : ret; }
@@ -1776,7 +1776,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp) DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &val); if (ret < 1) { - drm_err(mtk_dp->drm_dev, "Read irq vector failed\n"); + dev_err(mtk_dp->dev, "Read irq vector failed: %zd\n", ret); return ret == 0 ? -EIO : ret; }
@@ -2059,7 +2059,7 @@ static int mtk_dp_wait_hpd_asserted(struct drm_dp_aux *mtk_aux, unsigned long wa
ret = mtk_dp_parse_capabilities(mtk_dp); if (ret) { - drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n"); + dev_err(mtk_dp->dev, "Can't parse capabilities: %d\n", ret); return ret; }
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 40752f232054..852aeef9f38d 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -1116,12 +1116,12 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct mtk_dsi *dsi = host_to_dsi(host); - u32 recv_cnt, i; + ssize_t recv_cnt; u8 read_data[16]; void *src_addr; u8 irq_flag = CMD_DONE_INT_FLAG; u32 dsi_mode; - int ret; + int ret, i;
dsi_mode = readl(dsi->regs + DSI_MODE_CTRL); if (dsi_mode & MODE) { @@ -1170,7 +1170,7 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, if (recv_cnt) memcpy(msg->rx_buf, src_addr, recv_cnt);
- DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n", + DRM_INFO("dsi get %zd byte data from the panel address(0x%x)\n", recv_cnt, *((u8 *)(msg->tx_buf)));
restore_dsi_mode: diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index ca82bc829cb9..250ad0d4027d 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -137,7 +137,7 @@ enum hdmi_aud_channel_swap_type {
struct hdmi_audio_param { enum hdmi_audio_coding_type aud_codec; - enum hdmi_audio_sample_size aud_sampe_size; + enum hdmi_audio_sample_size aud_sample_size; enum hdmi_aud_input_type aud_input_type; enum hdmi_aud_i2s_fmt aud_i2s_fmt; enum hdmi_aud_mclk aud_mclk; @@ -173,6 +173,7 @@ struct mtk_hdmi { unsigned int sys_offset; void __iomem *regs; enum hdmi_colorspace csp; + struct platform_device *audio_pdev; struct hdmi_audio_param aud_param; bool audio_enable; bool powered; @@ -1074,7 +1075,7 @@ static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
hdmi->csp = HDMI_COLORSPACE_RGB; aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; aud_param->aud_input_type = HDMI_AUD_INPUT_I2S; aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; aud_param->aud_mclk = HDMI_AUD_MCLK_128FS; @@ -1572,14 +1573,14 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data, switch (daifmt->fmt) { case HDMI_I2S: hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S; hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS; break; case HDMI_SPDIF: hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF; break; default: @@ -1662,6 +1663,11 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = { .hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb, };
+static void mtk_hdmi_unregister_audio_driver(void *data) +{ + platform_device_unregister(data); +} + static int mtk_hdmi_register_audio_driver(struct device *dev) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); @@ -1672,13 +1678,20 @@ static int mtk_hdmi_register_audio_driver(struct device *dev) .data = hdmi, .no_capture_mute = 1, }; - struct platform_device *pdev; + int ret;
- pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, - PLATFORM_DEVID_AUTO, &codec_data, - sizeof(codec_data)); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + hdmi->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + if (IS_ERR(hdmi->audio_pdev)) + return PTR_ERR(hdmi->audio_pdev); + + ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver, + hdmi->audio_pdev); + if (ret) + return ret;
DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME); return 0; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 0fcae53c0b14..159665cb6b14 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -1507,6 +1507,8 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
/* Restore the size in the hardware */ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size); + + a6xx_state->nr_indexed_regs = count; }
static void a7xx_get_indexed_registers(struct msm_gpu *gpu, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index e5dcd41a361f..29485e76f531 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1262,10 +1262,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
- /* force a full mode set if active state changed */ - if (crtc_state->active_changed) - crtc_state->mode_changed = true; - if (cstate->num_mixers) { rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state); if (rc) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 48e6e8d74c85..7b56da24711e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -622,9 +622,9 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) if (dpu_enc->phys_encs[i]) intf_count++;
- /* See dpu_encoder_get_topology, we only support 2:2:1 topology */ - if (dpu_enc->dsc) - num_dsc = 2; + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) + if (dpu_enc->hw_dsc[i]) + num_dsc++;
return (num_dsc > 0) && (num_dsc > intf_count); } @@ -649,11 +649,14 @@ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
static struct msm_display_topology dpu_encoder_get_topology( struct dpu_encoder_virt *dpu_enc, - struct dpu_kms *dpu_kms, struct drm_display_mode *mode, struct drm_crtc_state *crtc_state, - struct drm_dsc_config *dsc) + struct drm_connector_state *conn_state) { + struct msm_drm_private *priv = dpu_enc->base.dev->dev_private; + struct msm_display_info *disp_info = &dpu_enc->disp_info; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct drm_dsc_config *dsc = dpu_encoder_get_dsc_config(&dpu_enc->base); struct msm_display_topology topology = {0}; int i, intf_count = 0;
@@ -686,14 +689,38 @@ static struct msm_display_topology dpu_encoder_get_topology(
if (dsc) { /* - * In case of Display Stream Compression (DSC), we would use - * 2 DSC encoders, 2 layer mixers and 1 interface - * this is power optimal and can drive up to (including) 4k - * screens + * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces + * when Display Stream Compression (DSC) is enabled, + * and when enough DSC blocks are available. + * This is power-optimal and can drive up to (including) 4k + * screens. */ - topology.num_dsc = 2; - topology.num_lm = 2; - topology.num_intf = 1; + WARN(topology.num_intf > 2, + "DSC topology cannot support more than 2 interfaces\n"); + if (intf_count >= 2 || dpu_kms->catalog->dsc_count >= 2) { + topology.num_dsc = 2; + topology.num_lm = 2; + } else { + topology.num_dsc = 1; + topology.num_lm = 1; + } + } + + /* + * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it. + * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check() + * earlier. + */ + if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) { + struct drm_framebuffer *fb; + + fb = conn_state->writeback_job->fb; + + if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) + topology.needs_cdm = true; + } else if (disp_info->intf_type == INTF_DP) { + if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], mode)) + topology.needs_cdm = true; }
return topology; @@ -733,6 +760,34 @@ static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms, cstate->num_mixers = num_lm; }
+/** + * dpu_encoder_virt_check_mode_changed: check if full modeset is required + * @drm_enc: Pointer to drm encoder structure + * @crtc_state: Corresponding CRTC state to be checked + * @conn_state: Corresponding Connector's state to be checked + * + * Check if the changes in the object properties demand full mode set. + */ +int dpu_encoder_virt_check_mode_changed(struct drm_encoder *drm_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + struct msm_display_topology topology; + + DPU_DEBUG_ENC(dpu_enc, "\n"); + + /* Using mode instead of adjusted_mode as it wasn't computed yet */ + topology = dpu_encoder_get_topology(dpu_enc, &crtc_state->mode, crtc_state, conn_state); + + if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm) + crtc_state->mode_changed = true; + else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm) + crtc_state->mode_changed = true; + + return 0; +} + static int dpu_encoder_virt_atomic_check( struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state, @@ -743,10 +798,7 @@ static int dpu_encoder_virt_atomic_check( struct dpu_kms *dpu_kms; struct drm_display_mode *adj_mode; struct msm_display_topology topology; - struct msm_display_info *disp_info; struct dpu_global_state *global_state; - struct drm_framebuffer *fb; - struct drm_dsc_config *dsc; int ret = 0;
if (!drm_enc || !crtc_state || !conn_state) { @@ -759,7 +811,6 @@ static int dpu_encoder_virt_atomic_check( DPU_DEBUG_ENC(dpu_enc, "\n");
priv = drm_enc->dev->dev_private; - disp_info = &dpu_enc->disp_info; dpu_kms = to_dpu_kms(priv->kms); adj_mode = &crtc_state->adjusted_mode; global_state = dpu_kms_get_global_state(crtc_state->state); @@ -768,37 +819,15 @@ static int dpu_encoder_virt_atomic_check(
trace_dpu_enc_atomic_check(DRMID(drm_enc));
- dsc = dpu_encoder_get_dsc_config(drm_enc); - - topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc); - - /* - * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it. - * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check() - * earlier. - */ - if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) { - fb = conn_state->writeback_job->fb; - - if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) - topology.needs_cdm = true; - } else if (disp_info->intf_type == INTF_DP) { - if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode)) - topology.needs_cdm = true; - } + topology = dpu_encoder_get_topology(dpu_enc, adj_mode, crtc_state, conn_state);
- if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm) - crtc_state->mode_changed = true; - else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm) - crtc_state->mode_changed = true; /* * Release and Allocate resources on every modeset - * Dont allocate when active is false. */ if (drm_atomic_crtc_needs_modeset(crtc_state)) { dpu_rm_release(global_state, drm_enc);
- if (!crtc_state->active_changed || crtc_state->enable) + if (crtc_state->enable) ret = dpu_rm_reserve(&dpu_kms->rm, global_state, drm_enc, crtc_state, &topology); if (!ret) @@ -2020,7 +2049,6 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl, static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, struct drm_dsc_config *dsc) { - /* coding only for 2LM, 2enc, 1 dsc config */ struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; struct dpu_hw_ctl *ctl = enc_master->hw_ctl; struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; @@ -2030,22 +2058,24 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, int dsc_common_mode; int pic_width; u32 initial_lines; + int num_dsc = 0; int i;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_pp[i] = dpu_enc->hw_pp[i]; hw_dsc[i] = dpu_enc->hw_dsc[i];
- if (!hw_pp[i] || !hw_dsc[i]) { - DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n"); - return; - } + if (!hw_pp[i] || !hw_dsc[i]) + break; + + num_dsc++; }
- dsc_common_mode = 0; pic_width = dsc->pic_width;
- dsc_common_mode = DSC_MODE_SPLIT_PANEL; + dsc_common_mode = 0; + if (num_dsc > 1) + dsc_common_mode |= DSC_MODE_SPLIT_PANEL; if (dpu_encoder_use_dsc_merge(enc_master->parent)) dsc_common_mode |= DSC_MODE_MULTIPLEX; if (enc_master->intf_mode == INTF_MODE_VIDEO) @@ -2054,14 +2084,10 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, this_frame_slices = pic_width / dsc->slice_width; intf_ip_w = this_frame_slices * dsc->slice_width;
- /* - * dsc merge case: when using 2 encoders for the same stream, - * no. of slices need to be same on both the encoders. - */ - enc_ip_w = intf_ip_w / 2; + enc_ip_w = intf_ip_w / num_dsc; initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) + for (i = 0; i < num_dsc; i++) dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index 92b5ee390788..da133ee4701a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -88,4 +88,8 @@ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
+int dpu_encoder_virt_check_mode_changed(struct drm_encoder *drm_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); + #endif /* __DPU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 97e9cb8c2b09..8741dc6fc8dd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -446,6 +446,29 @@ static void dpu_kms_disable_commit(struct msm_kms *kms) pm_runtime_put_sync(&dpu_kms->pdev->dev); }
+static int dpu_kms_check_mode_changed(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct drm_crtc_state *new_crtc_state; + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; + int i; + + for_each_new_connector_in_state(state, connector, new_conn_state, i) { + struct drm_encoder *encoder; + + if (!new_conn_state->crtc || !new_conn_state->best_encoder) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + + encoder = new_conn_state->best_encoder; + + dpu_encoder_virt_check_mode_changed(encoder, new_crtc_state, new_conn_state); + } + + return 0; +} + static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -1062,6 +1085,7 @@ static const struct msm_kms_funcs kms_funcs = { .irq = dpu_core_irq, .enable_commit = dpu_kms_enable_commit, .disable_commit = dpu_kms_disable_commit, + .check_mode_changed = dpu_kms_check_mode_changed, .flush_commit = dpu_kms_flush_commit, .wait_flush = dpu_kms_wait_flush, .complete_commit = dpu_kms_complete_commit, diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 007311c21fda..42e100a8adca 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -846,7 +846,7 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0)); }
-static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay) +static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode) { struct drm_dsc_config *dsc = msm_host->dsc; u32 reg, reg_ctrl, reg_ctrl2; @@ -858,7 +858,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod /* first calculate dsc parameters and then program * compress mode registers */ - slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay); + slice_per_intf = dsc->slice_count;
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf; bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */ @@ -991,7 +991,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) { if (msm_host->dsc) - dsi_update_dsc_timing(msm_host, false, mode->hdisplay); + dsi_update_dsc_timing(msm_host, false);
dsi_write(msm_host, REG_DSI_ACTIVE_H, DSI_ACTIVE_H_START(ha_start) | @@ -1012,7 +1012,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) DSI_ACTIVE_VSYNC_VPOS_END(vs_end)); } else { /* command mode */ if (msm_host->dsc) - dsi_update_dsc_timing(msm_host, true, mode->hdisplay); + dsi_update_dsc_timing(msm_host, true);
/* image data and 1 byte write_memory_start cmd */ if (!msm_host->dsc) diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index a210b7c9e5ca..4fabb01345aa 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -74,17 +74,35 @@ static int dsi_mgr_setup_components(int id) int ret;
if (!IS_BONDED_DSI()) { + /* + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + ret = msm_dsi_host_register(msm_dsi->host); if (ret) return ret; - - msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); - msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); } else if (other_dsi) { struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ? msm_dsi : other_dsi; struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ? other_dsi : msm_dsi; + + /* + * PLL0 is to drive both DSI link clocks in bonded DSI mode. + * + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(clk_master_dsi->phy, + MSM_DSI_PHY_MASTER); + msm_dsi_phy_set_usecase(clk_slave_dsi->phy, + MSM_DSI_PHY_SLAVE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); + /* Register slave host first, so that slave DSI device * has a chance to probe, and do not block the master * DSI device's probe. @@ -98,14 +116,6 @@ static int dsi_mgr_setup_components(int id) ret = msm_dsi_host_register(master_link_dsi->host); if (ret) return ret; - - /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */ - msm_dsi_phy_set_usecase(clk_master_dsi->phy, - MSM_DSI_PHY_MASTER); - msm_dsi_phy_set_usecase(clk_slave_dsi->phy, - MSM_DSI_PHY_SLAVE); - msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); - msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); }
return 0; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 798168180c1a..a2c87c84aa05 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -305,7 +305,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi writel(pll->phy->cphy_mode ? 0x00 : 0x10, base + REG_DSI_7nm_PHY_PLL_CMODE_1); writel(config->pll_clock_inverters, - base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS); + base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1); }
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index a7a2384044ff..364df245e3a2 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -183,10 +183,16 @@ static unsigned get_crtc_mask(struct drm_atomic_state *state)
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_crtc *crtc; - int i; + int i, ret = 0;
+ /* + * FIXME: stop setting allow_modeset and move this check to the DPU + * driver. + */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if ((old_crtc_state->ctm && !new_crtc_state->ctm) || @@ -196,6 +202,11 @@ int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) } }
+ if (kms && kms->funcs && kms->funcs->check_mode_changed) + ret = kms->funcs->check_mode_changed(kms, state); + if (ret) + return ret; + return drm_atomic_helper_check(dev, state); }
diff --git a/drivers/gpu/drm/msm/msm_dsc_helper.h b/drivers/gpu/drm/msm/msm_dsc_helper.h index b9049fe1e279..63f95523b2cb 100644 --- a/drivers/gpu/drm/msm/msm_dsc_helper.h +++ b/drivers/gpu/drm/msm/msm_dsc_helper.h @@ -12,17 +12,6 @@ #include <linux/math.h> #include <drm/display/drm_dsc_helper.h>
-/** - * msm_dsc_get_slices_per_intf() - calculate number of slices per interface - * @dsc: Pointer to drm dsc config struct - * @intf_width: interface width in pixels - * Returns: Integer representing the number of slices for the given interface - */ -static inline u32 msm_dsc_get_slices_per_intf(const struct drm_dsc_config *dsc, u32 intf_width) -{ - return DIV_ROUND_UP(intf_width, dsc->slice_width); -} - /** * msm_dsc_get_bytes_per_line() - calculate bytes per line * @dsc: Pointer to drm dsc config struct diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index dee470403036..3e9aa2cc38ef 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -509,7 +509,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit, }
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) { - ret = -SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags); + ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags); break; }
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index e60162744c66..ec2a75af89b0 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -59,6 +59,13 @@ struct msm_kms_funcs { void (*enable_commit)(struct msm_kms *kms); void (*disable_commit)(struct msm_kms *kms);
+ /** + * @check_mode_changed: + * + * Verify if the commit requires a full modeset on one of CRTCs. + */ + int (*check_mode_changed)(struct msm_kms *kms, struct drm_atomic_state *state); + /** * Prepare for atomic commit. This is called after any previous * (async or otherwise) commit has completed. diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c index 266a087fe14c..3c24a63b6be8 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c @@ -607,7 +607,7 @@ static int ili9882t_add(struct ili9882t *ili)
ili->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(ili->enable_gpio)) { - dev_err(dev, "cannot get reset-gpios %ld\n", + dev_err(dev, "cannot get enable-gpios %ld\n", PTR_ERR(ili->enable_gpio)); return PTR_ERR(ili->enable_gpio); } diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index 0a37cfeeb181..a9da1d1eeb70 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -128,14 +128,11 @@ static void panthor_device_reset_work(struct work_struct *work) struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work); int ret = 0, cookie;
- if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) { - /* - * No need for a reset as the device has been (or will be) - * powered down - */ - atomic_set(&ptdev->reset.pending, 0); + /* If the device is entering suspend, we don't reset. A slow reset will + * be forced at resume time instead. + */ + if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) return; - }
if (!drm_dev_enter(&ptdev->base, &cookie)) return; @@ -477,6 +474,14 @@ int panthor_device_resume(struct device *dev)
if (panthor_device_is_initialized(ptdev) && drm_dev_enter(&ptdev->base, &cookie)) { + /* If there was a reset pending at the time we suspended the + * device, we force a slow reset. + */ + if (atomic_read(&ptdev->reset.pending)) { + ptdev->reset.fast = false; + atomic_set(&ptdev->reset.pending, 0); + } + ret = panthor_device_resume_hw_components(ptdev); if (ret && ptdev->reset.fast) { drm_err(&ptdev->base, "Fast reset failed, trying a slow reset"); @@ -493,9 +498,6 @@ int panthor_device_resume(struct device *dev) goto err_suspend_devfreq; }
- if (atomic_read(&ptdev->reset.pending)) - queue_work(ptdev->reset.wq, &ptdev->reset.work); - /* Clear all IOMEM mappings pointing to this device after we've * resumed. This way the fake mappings pointing to the dummy pages * are removed and the real iomem mapping will be restored on next diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index 08136e790ca0..06fe46e32073 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1458,12 +1458,26 @@ static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev, drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency); }
+static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file) +{ + char *drv_name = file->minor->dev->driver->name; + struct panthor_file *pfile = file->driver_priv; + struct drm_memory_stats stats = {0}; + + panthor_fdinfo_gather_group_mem_info(pfile, &stats); + panthor_vm_heaps_sizes(pfile, &stats); + + drm_fdinfo_print_size(p, drv_name, "resident", "memory", stats.resident); + drm_fdinfo_print_size(p, drv_name, "active", "memory", stats.active); +} + static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file) { struct drm_device *dev = file->minor->dev; struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p); + panthor_show_internal_memory_stats(p, file);
drm_show_memory_stats(p, file); } diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 68eb4fb4d3a8..a024b475b688 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -637,8 +637,8 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev, u32 ehdr) { struct panthor_fw_build_info_hdr hdr; - char header[9]; - const char git_sha_header[sizeof(header)] = "git_sha: "; + static const char git_sha_header[] = "git_sha: "; + const int header_len = sizeof(git_sha_header) - 1; int ret;
ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr)); @@ -652,8 +652,7 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev, return 0; }
- if (memcmp(git_sha_header, fw->data + hdr.meta_start, - sizeof(git_sha_header))) { + if (memcmp(git_sha_header, fw->data + hdr.meta_start, header_len)) { /* Not the expected header, this isn't metadata we understand */ return 0; } @@ -666,7 +665,7 @@ static int panthor_fw_read_build_info(struct panthor_device *ptdev, }
drm_info(&ptdev->base, "Firmware git sha: %s\n", - fw->data + hdr.meta_start + sizeof(git_sha_header)); + fw->data + hdr.meta_start + header_len);
return 0; } diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h index 22448abde992..6598d96c6d2a 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.h +++ b/drivers/gpu/drm/panthor/panthor_fw.h @@ -102,9 +102,9 @@ struct panthor_fw_cs_output_iface { #define CS_STATUS_BLOCKED_REASON_SB_WAIT 1 #define CS_STATUS_BLOCKED_REASON_PROGRESS_WAIT 2 #define CS_STATUS_BLOCKED_REASON_SYNC_WAIT 3 -#define CS_STATUS_BLOCKED_REASON_DEFERRED 5 -#define CS_STATUS_BLOCKED_REASON_RES 6 -#define CS_STATUS_BLOCKED_REASON_FLUSH 7 +#define CS_STATUS_BLOCKED_REASON_DEFERRED 4 +#define CS_STATUS_BLOCKED_REASON_RESOURCE 5 +#define CS_STATUS_BLOCKED_REASON_FLUSH 6 #define CS_STATUS_BLOCKED_REASON_MASK GENMASK(3, 0) u32 status_blocked_reason; u32 status_wait_sync_value_hi; diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c index 3796a9eb22af..3bdf61c14264 100644 --- a/drivers/gpu/drm/panthor/panthor_heap.c +++ b/drivers/gpu/drm/panthor/panthor_heap.c @@ -97,6 +97,9 @@ struct panthor_heap_pool {
/** @gpu_contexts: Buffer object containing the GPU heap contexts. */ struct panthor_kernel_bo *gpu_contexts; + + /** @size: Size of all chunks across all heaps in the pool. */ + atomic_t size; };
static int panthor_heap_ctx_stride(struct panthor_device *ptdev) @@ -118,7 +121,7 @@ static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id) panthor_get_heap_ctx_offset(pool, id); }
-static void panthor_free_heap_chunk(struct panthor_vm *vm, +static void panthor_free_heap_chunk(struct panthor_heap_pool *pool, struct panthor_heap *heap, struct panthor_heap_chunk *chunk) { @@ -127,12 +130,13 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm, heap->chunk_count--; mutex_unlock(&heap->lock);
+ atomic_sub(heap->chunk_size, &pool->size); + panthor_kernel_bo_destroy(chunk->bo); kfree(chunk); }
-static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, - struct panthor_vm *vm, +static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool, struct panthor_heap *heap, bool initial_chunk) { @@ -144,7 +148,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, if (!chunk) return -ENOMEM;
- chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size, + chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size, DRM_PANTHOR_BO_NO_MMAP, DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC, PANTHOR_VM_KERNEL_AUTO_VA); @@ -180,6 +184,8 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, heap->chunk_count++; mutex_unlock(&heap->lock);
+ atomic_add(heap->chunk_size, &pool->size); + return 0;
err_destroy_bo: @@ -191,17 +197,16 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev, return ret; }
-static void panthor_free_heap_chunks(struct panthor_vm *vm, +static void panthor_free_heap_chunks(struct panthor_heap_pool *pool, struct panthor_heap *heap) { struct panthor_heap_chunk *chunk, *tmp;
list_for_each_entry_safe(chunk, tmp, &heap->chunks, node) - panthor_free_heap_chunk(vm, heap, chunk); + panthor_free_heap_chunk(pool, heap, chunk); }
-static int panthor_alloc_heap_chunks(struct panthor_device *ptdev, - struct panthor_vm *vm, +static int panthor_alloc_heap_chunks(struct panthor_heap_pool *pool, struct panthor_heap *heap, u32 chunk_count) { @@ -209,7 +214,7 @@ static int panthor_alloc_heap_chunks(struct panthor_device *ptdev, u32 i;
for (i = 0; i < chunk_count; i++) { - ret = panthor_alloc_heap_chunk(ptdev, vm, heap, true); + ret = panthor_alloc_heap_chunk(pool, heap, true); if (ret) return ret; } @@ -226,7 +231,7 @@ panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle) if (!heap) return -EINVAL;
- panthor_free_heap_chunks(pool->vm, heap); + panthor_free_heap_chunks(pool, heap); mutex_destroy(&heap->lock); kfree(heap); return 0; @@ -308,8 +313,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool, heap->max_chunks = max_chunks; heap->target_in_flight = target_in_flight;
- ret = panthor_alloc_heap_chunks(pool->ptdev, vm, heap, - initial_chunk_count); + ret = panthor_alloc_heap_chunks(pool, heap, initial_chunk_count); if (ret) goto err_free_heap;
@@ -342,7 +346,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool, return id;
err_free_heap: - panthor_free_heap_chunks(pool->vm, heap); + panthor_free_heap_chunks(pool, heap); mutex_destroy(&heap->lock); kfree(heap);
@@ -389,6 +393,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool, removed = chunk; list_del(&chunk->node); heap->chunk_count--; + atomic_sub(heap->chunk_size, &pool->size); break; } } @@ -466,7 +471,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool, * further jobs in this queue fail immediately instead of having to * wait for the job timeout. */ - ret = panthor_alloc_heap_chunk(pool->ptdev, pool->vm, heap, false); + ret = panthor_alloc_heap_chunk(pool, heap, false); if (ret) goto out_unlock;
@@ -560,6 +565,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm) if (ret) goto err_destroy_pool;
+ atomic_add(pool->gpu_contexts->obj->size, &pool->size); + return pool;
err_destroy_pool: @@ -594,8 +601,10 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool) xa_for_each(&pool->xa, i, heap) drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i));
- if (!IS_ERR_OR_NULL(pool->gpu_contexts)) + if (!IS_ERR_OR_NULL(pool->gpu_contexts)) { + atomic_sub(pool->gpu_contexts->obj->size, &pool->size); panthor_kernel_bo_destroy(pool->gpu_contexts); + }
/* Reflects the fact the pool has been destroyed. */ pool->vm = NULL; @@ -603,3 +612,18 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
panthor_heap_pool_put(pool); } + +/** + * panthor_heap_pool_size() - Get a heap pool's total size + * @pool: Pool whose total chunks size to return + * + * Returns the aggregated size of all chunks for all heaps in the pool + * + */ +size_t panthor_heap_pool_size(struct panthor_heap_pool *pool) +{ + if (!pool) + return 0; + + return atomic_read(&pool->size); +} diff --git a/drivers/gpu/drm/panthor/panthor_heap.h b/drivers/gpu/drm/panthor/panthor_heap.h index 25a5f2bba445..e3358d4e8edb 100644 --- a/drivers/gpu/drm/panthor/panthor_heap.h +++ b/drivers/gpu/drm/panthor/panthor_heap.h @@ -27,6 +27,8 @@ struct panthor_heap_pool * panthor_heap_pool_get(struct panthor_heap_pool *pool); void panthor_heap_pool_put(struct panthor_heap_pool *pool);
+size_t panthor_heap_pool_size(struct panthor_heap_pool *pool); + int panthor_heap_grow(struct panthor_heap_pool *pool, u64 heap_gpu_va, u32 renderpasses_in_flight, diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index c39e3eb1c15d..1202de8811c2 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -1941,6 +1941,33 @@ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c return pool; }
+/** + * panthor_vm_heaps_sizes() - Calculate size of all heap chunks across all + * heaps over all the heap pools in a VM + * @pfile: File. + * @stats: Memory stats to be updated. + * + * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM + * is active, record the size as active as well. + */ +void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats) +{ + struct panthor_vm *vm; + unsigned long i; + + if (!pfile->vms) + return; + + xa_lock(&pfile->vms->xa); + xa_for_each(&pfile->vms->xa, i, vm) { + size_t size = panthor_heap_pool_size(vm->heaps.pool); + stats->resident += size; + if (vm->as.id >= 0) + stats->active += size; + } + xa_unlock(&pfile->vms->xa); +} + static u64 mair_to_memattr(u64 mair, bool coherent) { u64 memattr = 0; diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h index 8d21e83d8aba..fc274637114e 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.h +++ b/drivers/gpu/drm/panthor/panthor_mmu.h @@ -9,6 +9,7 @@
struct drm_exec; struct drm_sched_job; +struct drm_memory_stats; struct panthor_gem_object; struct panthor_heap_pool; struct panthor_vm; @@ -37,6 +38,8 @@ int panthor_vm_flush_all(struct panthor_vm *vm); struct panthor_heap_pool * panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
+void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats); + struct panthor_vm *panthor_vm_get(struct panthor_vm *vm); void panthor_vm_put(struct panthor_vm *vm); struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu, diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 77b184c3fb0c..b8dbeb1586f6 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -9,6 +9,7 @@ #include <drm/panthor_drm.h>
#include <linux/build_bug.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> @@ -628,16 +629,19 @@ struct panthor_group { */ struct panthor_kernel_bo *syncobjs;
- /** @fdinfo: Per-file total cycle and timestamp values reference. */ + /** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */ struct { /** @data: Total sampled values for jobs in queues from this group. */ struct panthor_gpu_usage data;
/** - * @lock: Mutex to govern concurrent access from drm file's fdinfo callback - * and job post-completion processing function + * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo + * callback and job post-completion processing function */ - struct mutex lock; + spinlock_t lock; + + /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */ + size_t kbo_sizes; } fdinfo;
/** @state: Group state. */ @@ -910,8 +914,6 @@ static void group_release_work(struct work_struct *work) release_work); u32 i;
- mutex_destroy(&group->fdinfo.lock); - for (i = 0; i < group->queue_count; i++) group_free_queue(group, group->queues[i]);
@@ -2861,12 +2863,12 @@ static void update_fdinfo_stats(struct panthor_job *job) struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap; struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
- mutex_lock(&group->fdinfo.lock); - if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) - fdinfo->cycles += data->cycles.after - data->cycles.before; - if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) - fdinfo->time += data->time.after - data->time.before; - mutex_unlock(&group->fdinfo.lock); + scoped_guard(spinlock, &group->fdinfo.lock) { + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES) + fdinfo->cycles += data->cycles.after - data->cycles.before; + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) + fdinfo->time += data->time.after - data->time.before; + } }
void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) @@ -2878,14 +2880,15 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) if (IS_ERR_OR_NULL(gpool)) return;
+ xa_lock(&gpool->xa); xa_for_each(&gpool->xa, i, group) { - mutex_lock(&group->fdinfo.lock); + guard(spinlock)(&group->fdinfo.lock); pfile->stats.cycles += group->fdinfo.data.cycles; pfile->stats.time += group->fdinfo.data.time; group->fdinfo.data.cycles = 0; group->fdinfo.data.time = 0; - mutex_unlock(&group->fdinfo.lock); } + xa_unlock(&gpool->xa); }
static void group_sync_upd_work(struct work_struct *work) @@ -3381,6 +3384,29 @@ group_create_queue(struct panthor_group *group, return ERR_PTR(ret); }
+static void add_group_kbo_sizes(struct panthor_device *ptdev, + struct panthor_group *group) +{ + struct panthor_queue *queue; + int i; + + if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group))) + return; + if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev)) + return; + + group->fdinfo.kbo_sizes += group->suspend_buf->obj->size; + group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size; + group->fdinfo.kbo_sizes += group->syncobjs->obj->size; + + for (i = 0; i < group->queue_count; i++) { + queue = group->queues[i]; + group->fdinfo.kbo_sizes += queue->ringbuf->obj->size; + group->fdinfo.kbo_sizes += queue->iface.mem->obj->size; + group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size; + } +} + #define MAX_GROUPS_PER_POOL 128
int panthor_group_create(struct panthor_file *pfile, @@ -3505,7 +3531,8 @@ int panthor_group_create(struct panthor_file *pfile, } mutex_unlock(&sched->reset.lock);
- mutex_init(&group->fdinfo.lock); + add_group_kbo_sizes(group->ptdev, group); + spin_lock_init(&group->fdinfo.lock);
return gid;
@@ -3624,6 +3651,33 @@ void panthor_group_pool_destroy(struct panthor_file *pfile) pfile->groups = NULL; }
+/** + * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's + * belonging to all the groups owned by an open Panthor file + * @pfile: File. + * @stats: Memory statistics to be updated. + * + */ +void +panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile, + struct drm_memory_stats *stats) +{ + struct panthor_group_pool *gpool = pfile->groups; + struct panthor_group *group; + unsigned long i; + + if (IS_ERR_OR_NULL(gpool)) + return; + + xa_lock(&gpool->xa); + xa_for_each(&gpool->xa, i, group) { + stats->resident += group->fdinfo.kbo_sizes; + if (group->csg_id >= 0) + stats->active += group->fdinfo.kbo_sizes; + } + xa_unlock(&gpool->xa); +} + static void job_release(struct kref *ref) { struct panthor_job *job = container_of(ref, struct panthor_job, refcount); diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h index 5ae6b4bde7c5..e650a445cf50 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.h +++ b/drivers/gpu/drm/panthor/panthor_sched.h @@ -9,6 +9,7 @@ struct dma_fence; struct drm_file; struct drm_gem_object; struct drm_sched_job; +struct drm_memory_stats; struct drm_panthor_group_create; struct drm_panthor_queue_create; struct drm_panthor_group_get_state; @@ -36,6 +37,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
int panthor_group_pool_create(struct panthor_file *pfile); void panthor_group_pool_destroy(struct panthor_file *pfile); +void panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile, + struct drm_memory_stats *stats);
int panthor_sched_init(struct panthor_device *ptdev); void panthor_sched_unplug(struct panthor_device *ptdev); diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c index 08334be38694..7c935870f7d2 100644 --- a/drivers/gpu/drm/solomon/ssd130x-spi.c +++ b/drivers/gpu/drm/solomon/ssd130x-spi.c @@ -151,7 +151,6 @@ static const struct of_device_id ssd130x_of_match[] = { }; MODULE_DEVICE_TABLE(of, ssd130x_of_match);
-#if IS_MODULE(CONFIG_DRM_SSD130X_SPI) /* * The SPI core always reports a MODALIAS uevent of the form "spi:<dev>", even * if the device was registered via OF. This means that the module will not be @@ -160,7 +159,7 @@ MODULE_DEVICE_TABLE(of, ssd130x_of_match); * To workaround this issue, add a SPI device ID table. Even when this should * not be needed for this driver to match the registered SPI devices. */ -static const struct spi_device_id ssd130x_spi_table[] = { +static const struct spi_device_id ssd130x_spi_id[] = { /* ssd130x family */ { "sh1106", SH1106_ID }, { "ssd1305", SSD1305_ID }, @@ -175,14 +174,14 @@ static const struct spi_device_id ssd130x_spi_table[] = { { "ssd1331", SSD1331_ID }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(spi, ssd130x_spi_table); -#endif +MODULE_DEVICE_TABLE(spi, ssd130x_spi_id);
static struct spi_driver ssd130x_spi_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = ssd130x_of_match, }, + .id_table = ssd130x_spi_id, .probe = ssd130x_spi_probe, .remove = ssd130x_spi_remove, .shutdown = ssd130x_spi_shutdown, diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index b777690fd660..dd2006d51c7a 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -880,7 +880,7 @@ static int ssd132x_update_rect(struct ssd130x_device *ssd130x, u8 n1 = buf[i * width + j]; u8 n2 = buf[i * width + j + 1];
- data_array[array_idx++] = (n2 << 4) | n1; + data_array[array_idx++] = (n2 & 0xf0) | (n1 >> 4); } }
@@ -1037,7 +1037,7 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb, struct drm_format_conv_state *fmtcnv_state) { struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev); - unsigned int dst_pitch = drm_rect_width(rect); + unsigned int dst_pitch; struct iosys_map dst; int ret = 0;
@@ -1046,6 +1046,8 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb, rect->x2 = min_t(unsigned int, round_up(rect->x2, SSD132X_SEGMENT_WIDTH), ssd130x->width);
+ dst_pitch = drm_rect_width(rect); + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); if (ret) return ret; diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index e0409aba9349..7fefef690ab6 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -244,17 +244,19 @@ static int __init vkms_init(void) if (!config) return -ENOMEM;
- default_config = config; - config->cursor = enable_cursor; config->writeback = enable_writeback; config->overlay = enable_overlay;
ret = vkms_create(config); - if (ret) + if (ret) { kfree(config); + return ret; + }
- return ret; + default_config = config; + + return 0; }
static void vkms_destroy(struct vkms_config *config) @@ -278,9 +280,10 @@ static void vkms_destroy(struct vkms_config *config)
static void __exit vkms_exit(void) { - if (default_config->dev) - vkms_destroy(default_config); + if (!default_config) + return;
+ vkms_destroy(default_config); kfree(default_config); }
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index b51a2bde73e2..dcf6583a4c52 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -52,7 +52,7 @@ config DRM_XE config DRM_XE_DISPLAY bool "Enable display support" depends on DRM_XE && DRM_XE=m && HAS_IOPORT - select FB_IOMEM_HELPERS + select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION select I2C select I2C_ALGOBIT default y diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index 979f6d3239ba..189a08cdc73c 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -2295,7 +2295,7 @@ static int zynqmp_dp_ignore_hpd_set(void *data, u64 val)
mutex_lock(&dp->lock); dp->ignore_hpd = val; - mutex_lock(&dp->lock); + mutex_unlock(&dp->lock); return 0; }
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c index fa5f0ace6084..f07ff4eb3a6d 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c @@ -323,12 +323,16 @@ int zynqmp_audio_init(struct zynqmp_dpsub *dpsub)
audio->dai_name = devm_kasprintf(dev, GFP_KERNEL, "%s-dai", dev_name(dev)); + if (!audio->dai_name) + return -ENOMEM;
for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) { audio->link_names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s-dp-%u", dev_name(dev), i); audio->pcm_names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s-pcm-%u", dev_name(dev), i); + if (!audio->link_names[i] || !audio->pcm_names[i]) + return -ENOMEM; }
audio->base = devm_platform_ioremap_resource_byname(pdev, "aud"); diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index f953ca48a930..3a9544b97bc5 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -201,6 +201,8 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev) if (ret) return ret;
+ dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); + /* Try the reserved memory. Proceed if there's none. */ of_reserved_mem_device_init(&pdev->dev);
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c index 473ac3f2d382..da31f1131afc 100644 --- a/drivers/greybus/gb-beagleplay.c +++ b/drivers/greybus/gb-beagleplay.c @@ -912,7 +912,9 @@ static enum fw_upload_err cc1352_prepare(struct fw_upload *fw_upload, cc1352_bootloader_reset(bg); WRITE_ONCE(bg->flashing_mode, false); msleep(200); - gb_greybus_init(bg); + if (gb_greybus_init(bg) < 0) + return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_RW_ERROR, + "Failed to initialize greybus"); gb_beagleplay_start_svc(bg); return FW_UPLOAD_ERR_FW_INVALID; } diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 482b096eea28..0abfe51704a0 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -166,7 +166,6 @@ obj-$(CONFIG_USB_KBD) += usbhid/ obj-$(CONFIG_I2C_HID_CORE) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/ -obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index 275cc0d9f505..3378bb77e6b4 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -269,7 +269,7 @@ catu_init_sg_table(struct device *catu_dev, int node, * Each table can address upto 1MB and we can have * CATU_PAGES_PER_SYSPAGE tables in a system page. */ - nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE; + nr_tpages = DIV_ROUND_UP(size, CATU_PAGES_PER_SYSPAGE * SZ_1M); catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages, size >> PAGE_SHIFT, pages); if (IS_ERR(catu_table)) diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 0a9380350fb5..4936dc2f7a56 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1092,18 +1092,20 @@ static void coresight_remove_conns(struct coresight_device *csdev) }
/** - * coresight_timeout - loop until a bit has changed to a specific register - * state. + * coresight_timeout_action - loop until a bit has changed to a specific register + * state, with a callback after every trial. * @csa: coresight device access for the device * @offset: Offset of the register from the base of the device. * @position: the position of the bit of interest. * @value: the value the bit should have. + * @cb: Call back after each trial. * * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if * TIMEOUT_US has elapsed, which ever happens first. */ -int coresight_timeout(struct csdev_access *csa, u32 offset, - int position, int value) +int coresight_timeout_action(struct csdev_access *csa, u32 offset, + int position, int value, + coresight_timeout_cb_t cb) { int i; u32 val; @@ -1119,7 +1121,8 @@ int coresight_timeout(struct csdev_access *csa, u32 offset, if (!(val & BIT(position))) return 0; } - + if (cb) + cb(csa, offset, position, value); /* * Delay is arbitrary - the specification doesn't say how long * we are expected to wait. Extra check required to make sure @@ -1131,6 +1134,13 @@ int coresight_timeout(struct csdev_access *csa, u32 offset,
return -EAGAIN; } +EXPORT_SYMBOL_GPL(coresight_timeout_action); + +int coresight_timeout(struct csdev_access *csa, u32 offset, + int position, int value) +{ + return coresight_timeout_action(csa, offset, position, value, NULL); +} EXPORT_SYMBOL_GPL(coresight_timeout);
u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset) diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 2c1a60577728..5bda265d0234 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -428,6 +428,29 @@ static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, } #endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
+static void etm4x_sys_ins_barrier(struct csdev_access *csa, u32 offset, int pos, int val) +{ + if (!csa->io_mem) + isb(); +} + +/* + * etm4x_wait_status: Poll for TRCSTATR.<pos> == <val>. While using system + * instruction to access the trace unit, each access must be separated by a + * synchronization barrier. See ARM IHI0064H.b section "4.3.7 Synchronization of + * register updates", for system instructions section, in "Notes": + * + * "In particular, whenever disabling or enabling the trace unit, a poll of + * TRCSTATR needs explicit synchronization between each read of TRCSTATR" + */ +static int etm4x_wait_status(struct csdev_access *csa, int pos, int val) +{ + if (!csa->io_mem) + return coresight_timeout_action(csa, TRCSTATR, pos, val, + etm4x_sys_ins_barrier); + return coresight_timeout(csa, TRCSTATR, pos, val); +} + static int etm4_enable_hw(struct etmv4_drvdata *drvdata) { int i, rc; @@ -459,7 +482,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) isb();
/* wait for TRCSTATR.IDLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) + if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); if (drvdata->nr_pe) @@ -552,7 +575,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) isb();
/* wait for TRCSTATR.IDLE to go back down to '0' */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0)) + if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n");
@@ -941,10 +964,25 @@ static void etm4_disable_hw(void *info) tsb_csync(); etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
+ /* + * As recommended by section 4.3.7 ("Synchronization when using system + * instructions to progrom the trace unit") of ARM IHI 0064H.b, the + * self-hosted trace analyzer must perform a Context synchronization + * event between writing to the TRCPRGCTLR and reading the TRCSTATR. + */ + if (!csa->io_mem) + isb(); + /* wait for TRCSTATR.PMSTABLE to go to '1' */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for PM stable Trace Status\n"); + /* + * As recommended by section 4.3.7 (Synchronization of register updates) + * of ARM IHI 0064H.b. + */ + isb(); + /* read the status of the single shot comparators */ for (i = 0; i < drvdata->nr_ss_cmp; i++) { config->ss_status[i] = @@ -1746,7 +1784,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) etm4_os_lock(drvdata);
/* wait for TRCSTATR.PMSTABLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) { + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { dev_err(etm_dev, "timeout while waiting for PM Stable Status\n"); etm4_os_unlock(drvdata); @@ -1837,7 +1875,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcpdcr = etm4x_read32(csa, TRCPDCR);
/* wait for TRCSTATR.IDLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) { + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); etm4_os_unlock(drvdata); diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index d6057d8c7dec..ecc07c17f4c7 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -1037,7 +1037,7 @@ static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
/* Create the IBIRULES register for both cases */ i3c_bus_for_each_i3cdev(&master->base.bus, dev) { - if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER) + if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP)) continue;
if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) { diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 962d289065ab..1b2014c4c4b4 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -712,7 +712,7 @@ static int mma8452_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct mma8452_data *data = iio_priv(indio_dev); - int i, ret; + int i, j, ret;
ret = iio_device_claim_direct_mode(indio_dev); if (ret) @@ -772,14 +772,18 @@ static int mma8452_write_raw(struct iio_dev *indio_dev, break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO: - ret = mma8452_get_odr_index(data); + j = mma8452_get_odr_index(data);
for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) { - if (mma8452_os_ratio[i][ret] == val) { + if (mma8452_os_ratio[i][j] == val) { ret = mma8452_set_power_mode(data, i); break; } } + if (i == ARRAY_SIZE(mma8452_os_ratio)) { + ret = -EINVAL; + break; + } break; default: ret = -EINVAL; diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c index e7fb860f3233..c2b05d1f7239 100644 --- a/drivers/iio/accel/msa311.c +++ b/drivers/iio/accel/msa311.c @@ -594,23 +594,25 @@ static int msa311_read_raw_data(struct iio_dev *indio_dev, __le16 axis; int err;
- err = pm_runtime_resume_and_get(dev); + err = iio_device_claim_direct_mode(indio_dev); if (err) return err;
- err = iio_device_claim_direct_mode(indio_dev); - if (err) + err = pm_runtime_resume_and_get(dev); + if (err) { + iio_device_release_direct_mode(indio_dev); return err; + }
mutex_lock(&msa311->lock); err = msa311_get_axis(msa311, chan, &axis); mutex_unlock(&msa311->lock);
- iio_device_release_direct_mode(indio_dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev);
+ iio_device_release_direct_mode(indio_dev); + if (err) { dev_err(dev, "can't get axis %s (%pe)\n", chan->datasheet_name, ERR_PTR(err)); @@ -756,10 +758,6 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) unsigned int odr; int err;
- err = pm_runtime_resume_and_get(dev); - if (err) - return err; - /* * Sampling frequency changing is prohibited when buffer mode is * enabled, because sometimes MSA311 chip returns outliers during @@ -769,6 +767,12 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) if (err) return err;
+ err = pm_runtime_resume_and_get(dev); + if (err) { + iio_device_release_direct_mode(indio_dev); + return err; + } + err = -EINVAL; for (odr = 0; odr < ARRAY_SIZE(msa311_odr_table); odr++) if (val == msa311_odr_table[odr].integral && @@ -779,11 +783,11 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) break; }
- iio_device_release_direct_mode(indio_dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev);
+ iio_device_release_direct_mode(indio_dev); + if (err) dev_err(dev, "can't update frequency (%pe)\n", ERR_PTR(err));
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c index de32cc9d18c5..712f95f53c9e 100644 --- a/drivers/iio/adc/ad4130.c +++ b/drivers/iio/adc/ad4130.c @@ -223,6 +223,10 @@ enum ad4130_pin_function { AD4130_PIN_FN_VBIAS = BIT(3), };
+/* + * If you make adaptations in this struct, you most likely also have to adapt + * ad4130_setup_info_eq(), too. + */ struct ad4130_setup_info { unsigned int iout0_val; unsigned int iout1_val; @@ -591,6 +595,40 @@ static irqreturn_t ad4130_irq_handler(int irq, void *private) return IRQ_HANDLED; }
+static bool ad4130_setup_info_eq(struct ad4130_setup_info *a, + struct ad4130_setup_info *b) +{ + /* + * This is just to make sure that the comparison is adapted after + * struct ad4130_setup_info was changed. + */ + static_assert(sizeof(*a) == + sizeof(struct { + unsigned int iout0_val; + unsigned int iout1_val; + unsigned int burnout; + unsigned int pga; + unsigned int fs; + u32 ref_sel; + enum ad4130_filter_mode filter_mode; + bool ref_bufp; + bool ref_bufm; + })); + + if (a->iout0_val != b->iout0_val || + a->iout1_val != b->iout1_val || + a->burnout != b->burnout || + a->pga != b->pga || + a->fs != b->fs || + a->ref_sel != b->ref_sel || + a->filter_mode != b->filter_mode || + a->ref_bufp != b->ref_bufp || + a->ref_bufm != b->ref_bufm) + return false; + + return true; +} + static int ad4130_find_slot(struct ad4130_state *st, struct ad4130_setup_info *target_setup_info, unsigned int *slot, bool *overwrite) @@ -604,8 +642,7 @@ static int ad4130_find_slot(struct ad4130_state *st, struct ad4130_slot_info *slot_info = &st->slots_info[i];
/* Immediately accept a matching setup info. */ - if (!memcmp(target_setup_info, &slot_info->setup, - sizeof(*target_setup_info))) { + if (ad4130_setup_info_eq(target_setup_info, &slot_info->setup)) { *slot = i; return 0; } diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 6ae27cdd3250..de90ecb5f630 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -151,7 +151,11 @@ struct ad7124_chip_info { struct ad7124_channel_config { bool live; unsigned int cfg_slot; - /* Following fields are used to compare equality. */ + /* + * Following fields are used to compare for equality. If you + * make adaptations in it, you most likely also have to adapt + * ad7124_find_similar_live_cfg(), too. + */ struct_group(config_props, enum ad7124_ref_sel refsel; bool bipolar; @@ -338,15 +342,38 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_ struct ad7124_channel_config *cfg) { struct ad7124_channel_config *cfg_aux; - ptrdiff_t cmp_size; int i;
- cmp_size = sizeof_field(struct ad7124_channel_config, config_props); + /* + * This is just to make sure that the comparison is adapted after + * struct ad7124_channel_config was changed. + */ + static_assert(sizeof_field(struct ad7124_channel_config, config_props) == + sizeof(struct { + enum ad7124_ref_sel refsel; + bool bipolar; + bool buf_positive; + bool buf_negative; + unsigned int vref_mv; + unsigned int pga_bits; + unsigned int odr; + unsigned int odr_sel_bits; + unsigned int filter_type; + })); + for (i = 0; i < st->num_channels; i++) { cfg_aux = &st->channels[i].cfg;
if (cfg_aux->live && - !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size)) + cfg->refsel == cfg_aux->refsel && + cfg->bipolar == cfg_aux->bipolar && + cfg->buf_positive == cfg_aux->buf_positive && + cfg->buf_negative == cfg_aux->buf_negative && + cfg->vref_mv == cfg_aux->vref_mv && + cfg->pga_bits == cfg_aux->pga_bits && + cfg->odr == cfg_aux->odr && + cfg->odr_sel_bits == cfg_aux->odr_sel_bits && + cfg->filter_type == cfg_aux->filter_type) return cfg_aux; }
@@ -540,14 +567,21 @@ static int ad7124_append_status(struct ad_sigma_delta *sd, bool append) return 0; }
-static int ad7124_disable_all(struct ad_sigma_delta *sd) +static int ad7124_disable_one(struct ad_sigma_delta *sd, unsigned int chan) { struct ad7124_state *st = container_of(sd, struct ad7124_state, sd); + + /* The relevant thing here is that AD7124_CHANNEL_EN_MSK is cleared. */ + return ad_sd_write_reg(&st->sd, AD7124_CHANNEL(chan), 2, 0); +} + +static int ad7124_disable_all(struct ad_sigma_delta *sd) +{ int ret; int i;
- for (i = 0; i < st->num_channels; i++) { - ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK, 0, 2); + for (i = 0; i < 16; i++) { + ret = ad7124_disable_one(sd, i); if (ret < 0) return ret; } @@ -555,13 +589,6 @@ static int ad7124_disable_all(struct ad_sigma_delta *sd) return 0; }
-static int ad7124_disable_one(struct ad_sigma_delta *sd, unsigned int chan) -{ - struct ad7124_state *st = container_of(sd, struct ad7124_state, sd); - - return ad7124_spi_write_mask(st, AD7124_CHANNEL(chan), AD7124_CHANNEL_EN_MSK, 0, 2); -} - static const struct ad_sigma_delta_info ad7124_sigma_delta_info = { .set_channel = ad7124_set_channel, .append_status = ad7124_append_status, @@ -1016,11 +1043,10 @@ static int ad7124_setup(struct ad7124_state *st) * set all channels to this default value. */ ad7124_set_channel_odr(st, i, 10); - - /* Disable all channels to prevent unintended conversions. */ - ad_sd_write_reg(&st->sd, AD7124_CHANNEL(i), 2, 0); }
+ ad7124_disable_all(&st->sd); + ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control); if (ret < 0) return dev_err_probe(dev, ret, "Failed to setup CONTROL register\n"); diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c index 6c4ed10ae580..4f8810e35a8d 100644 --- a/drivers/iio/adc/ad7173.c +++ b/drivers/iio/adc/ad7173.c @@ -189,7 +189,11 @@ struct ad7173_channel_config { u8 cfg_slot; bool live;
- /* Following fields are used to compare equality. */ + /* + * Following fields are used to compare equality. If you + * make adaptations in it, you most likely also have to adapt + * ad7173_find_live_config(), too. + */ struct_group(config_props, bool bipolar; bool input_buf; @@ -559,6 +563,9 @@ static ssize_t ad7173_write_syscalib(struct iio_dev *indio_dev, if (ret) return ret;
+ if (!iio_device_claim_direct(indio_dev)) + return -EBUSY; + mode = st->channels[chan->channel].syscalib_mode; if (sys_calib) { if (mode == AD7173_SYSCALIB_ZERO_SCALE) @@ -569,6 +576,8 @@ static ssize_t ad7173_write_syscalib(struct iio_dev *indio_dev, chan->address); }
+ iio_device_release_direct(indio_dev); + return ret ? : len; }
@@ -712,15 +721,28 @@ static struct ad7173_channel_config * ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg) { struct ad7173_channel_config *cfg_aux; - ptrdiff_t cmp_size; int i;
- cmp_size = sizeof_field(struct ad7173_channel_config, config_props); + /* + * This is just to make sure that the comparison is adapted after + * struct ad7173_channel_config was changed. + */ + static_assert(sizeof_field(struct ad7173_channel_config, config_props) == + sizeof(struct { + bool bipolar; + bool input_buf; + u8 odr; + u8 ref_sel; + })); + for (i = 0; i < st->num_channels; i++) { cfg_aux = &st->channels[i].cfg;
if (cfg_aux->live && - !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size)) + cfg->bipolar == cfg_aux->bipolar && + cfg->input_buf == cfg_aux->input_buf && + cfg->odr == cfg_aux->odr && + cfg->ref_sel == cfg_aux->ref_sel) return cfg_aux; } return NULL; diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index cfaf8f7e0a07..1ebb738d99f5 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -256,6 +256,9 @@ static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev, if (ret) return ret;
+ if (!iio_device_claim_direct(indio_dev)) + return -EBUSY; + temp = st->syscalib_mode[chan->channel]; if (sys_calib) { if (temp == AD7192_SYSCALIB_ZERO_SCALE) @@ -266,6 +269,8 @@ static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev, chan->address); }
+ iio_device_release_direct(indio_dev); + return ret ? ret : len; }
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index 113703fb7245..6f8816483f1a 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -574,6 +574,21 @@ static int ad7768_probe(struct spi_device *spi) return -ENOMEM;
st = iio_priv(indio_dev); + /* + * Datasheet recommends SDI line to be kept high when data is not being + * clocked out of the controller and the spi clock is free running, + * to prevent accidental reset. + * Since many controllers do not support the SPI_MOSI_IDLE_HIGH flag + * yet, only request the MOSI idle state to enable if the controller + * supports it. + */ + if (spi->controller->mode_bits & SPI_MOSI_IDLE_HIGH) { + spi->mode |= SPI_MOSI_IDLE_HIGH; + ret = spi_setup(spi); + if (ret < 0) + return ret; + } + st->spi = spi;
st->vref = devm_regulator_get(&spi->dev, "vref"); diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index d5d81581ab34..77b4e8bc4748 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -339,6 +339,7 @@ int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, out: sigma_delta->keep_cs_asserted = false; ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); + ad_sigma_delta_disable_one(sigma_delta, channel); sigma_delta->bus_locked = false; spi_bus_unlock(sigma_delta->spi->controller);
diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c index b143f7ed6847..ac871deb8063 100644 --- a/drivers/iio/dac/adi-axi-dac.c +++ b/drivers/iio/dac/adi-axi-dac.c @@ -585,6 +585,14 @@ static int axi_dac_ddr_disable(struct iio_backend *back) static int axi_dac_data_stream_enable(struct iio_backend *back) { struct axi_dac_state *st = iio_backend_get_priv(back); + int ret, val; + + ret = regmap_read_poll_timeout(st->regmap, + AXI_DAC_UI_STATUS_REG, val, + FIELD_GET(AXI_DAC_UI_STATUS_IF_BUSY, val) == 0, + 10, 100 * KILO); + if (ret) + return ret;
return regmap_set_bits(st->regmap, AXI_DAC_CUSTOM_CTRL_REG, AXI_DAC_CUSTOM_CTRL_STREAM_ENABLE); diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c index 363281272035..aa2b8b38ab58 100644 --- a/drivers/iio/industrialio-backend.c +++ b/drivers/iio/industrialio-backend.c @@ -155,10 +155,12 @@ static ssize_t iio_backend_debugfs_write_reg(struct file *file, ssize_t rc; int ret;
- rc = simple_write_to_buffer(buf, sizeof(buf), ppos, userbuf, count); + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count); if (rc < 0) return rc;
+ buf[count] = '\0'; + ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
switch (ret) { diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c index d70ebe3bf774..d14f3507f34e 100644 --- a/drivers/iio/industrialio-gts-helper.c +++ b/drivers/iio/industrialio-gts-helper.c @@ -950,7 +950,15 @@ int iio_gts_find_gain_time_sel_for_scale(struct iio_gts *gts, int scale_int, } EXPORT_SYMBOL_NS_GPL(iio_gts_find_gain_time_sel_for_scale, "IIO_GTS_HELPER");
-static int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time) +/** + * iio_gts_get_total_gain - Fetch total gain for given HW-gain and time + * @gts: Gain time scale descriptor + * @gain: HW-gain for which the total gain is searched for + * @time: Integration time for which the total gain is searched for + * + * Return: total gain on success and -EINVAL on error. + */ +int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time) { const struct iio_itime_sel_mul *itime;
@@ -966,6 +974,7 @@ static int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time)
return gain * itime->mul; } +EXPORT_SYMBOL_NS_GPL(iio_gts_get_total_gain, "IIO_GTS_HELPER");
static int iio_gts_get_scale_linear(struct iio_gts *gts, int gain, int time, u64 *scale) diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index e34e551eef3e..eb7f56eaeae0 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -683,6 +683,7 @@ config VEML6030 select REGMAP_I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER + select IIO_GTS_HELPER depends on I2C help Say Y here if you want to build a driver for the Vishay VEML6030 diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c index 9b71825eea9b..750d3c2267a4 100644 --- a/drivers/iio/light/veml6030.c +++ b/drivers/iio/light/veml6030.c @@ -24,10 +24,12 @@ #include <linux/regmap.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> +#include <linux/units.h> #include <linux/regulator/consumer.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/events.h> +#include <linux/iio/iio-gts-helper.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h>
@@ -59,22 +61,36 @@ #define VEML6035_INT_CHAN BIT(3) #define VEML6035_CHAN_EN BIT(2)
+/* Regfields */ +#define VEML6030_GAIN_RF REG_FIELD(VEML6030_REG_ALS_CONF, 11, 12) +#define VEML6030_IT_RF REG_FIELD(VEML6030_REG_ALS_CONF, 6, 9) + +#define VEML6035_GAIN_RF REG_FIELD(VEML6030_REG_ALS_CONF, 10, 12) + +/* Maximum scales x 10000 to work with integers */ +#define VEML6030_MAX_SCALE 21504 +#define VEML6035_MAX_SCALE 4096 + enum veml6030_scan { VEML6030_SCAN_ALS, VEML6030_SCAN_WH, VEML6030_SCAN_TIMESTAMP, };
+struct veml6030_rf { + struct regmap_field *it; + struct regmap_field *gain; +}; + struct veml603x_chip { const char *name; - const int(*scale_vals)[][2]; - const int num_scale_vals; const struct iio_chan_spec *channels; const int num_channels; + const struct reg_field gain_rf; + const struct reg_field it_rf; + const int max_scale; int (*hw_init)(struct iio_dev *indio_dev, struct device *dev); int (*set_info)(struct iio_dev *indio_dev); - int (*set_als_gain)(struct iio_dev *indio_dev, int val, int val2); - int (*get_als_gain)(struct iio_dev *indio_dev, int *val, int *val2); };
/* @@ -91,40 +107,56 @@ struct veml603x_chip { struct veml6030_data { struct i2c_client *client; struct regmap *regmap; - int cur_resolution; - int cur_gain; - int cur_integration_time; + struct veml6030_rf rf; const struct veml603x_chip *chip; + struct iio_gts gts; + };
-static const int veml6030_it_times[][2] = { - { 0, 25000 }, - { 0, 50000 }, - { 0, 100000 }, - { 0, 200000 }, - { 0, 400000 }, - { 0, 800000 }, +#define VEML6030_SEL_IT_25MS 0x0C +#define VEML6030_SEL_IT_50MS 0x08 +#define VEML6030_SEL_IT_100MS 0x00 +#define VEML6030_SEL_IT_200MS 0x01 +#define VEML6030_SEL_IT_400MS 0x02 +#define VEML6030_SEL_IT_800MS 0x03 +static const struct iio_itime_sel_mul veml6030_it_sel[] = { + GAIN_SCALE_ITIME_US(25000, VEML6030_SEL_IT_25MS, 1), + GAIN_SCALE_ITIME_US(50000, VEML6030_SEL_IT_50MS, 2), + GAIN_SCALE_ITIME_US(100000, VEML6030_SEL_IT_100MS, 4), + GAIN_SCALE_ITIME_US(200000, VEML6030_SEL_IT_200MS, 8), + GAIN_SCALE_ITIME_US(400000, VEML6030_SEL_IT_400MS, 16), + GAIN_SCALE_ITIME_US(800000, VEML6030_SEL_IT_800MS, 32), };
-/* - * Scale is 1/gain. Value 0.125 is ALS gain x (1/8), 0.25 is - * ALS gain x (1/4), 0.5 is ALS gain x (1/2), 1.0 is ALS gain x 1, - * 2.0 is ALS gain x2, and 4.0 is ALS gain x 4. +/* Gains are multiplied by 8 to work with integers. The values in the + * iio-gts tables don't need corrections because the maximum value of + * the scale refers to GAIN = x1, and the rest of the values are + * obtained from the resulting linear function. */ -static const int veml6030_scale_vals[][2] = { - { 0, 125000 }, - { 0, 250000 }, - { 1, 0 }, - { 2, 0 }, +#define VEML6030_SEL_MILLI_GAIN_X125 2 +#define VEML6030_SEL_MILLI_GAIN_X250 3 +#define VEML6030_SEL_MILLI_GAIN_X1000 0 +#define VEML6030_SEL_MILLI_GAIN_X2000 1 +static const struct iio_gain_sel_pair veml6030_gain_sel[] = { + GAIN_SCALE_GAIN(1, VEML6030_SEL_MILLI_GAIN_X125), + GAIN_SCALE_GAIN(2, VEML6030_SEL_MILLI_GAIN_X250), + GAIN_SCALE_GAIN(8, VEML6030_SEL_MILLI_GAIN_X1000), + GAIN_SCALE_GAIN(16, VEML6030_SEL_MILLI_GAIN_X2000), };
-static const int veml6035_scale_vals[][2] = { - { 0, 125000 }, - { 0, 250000 }, - { 0, 500000 }, - { 1, 0 }, - { 2, 0 }, - { 4, 0 }, +#define VEML6035_SEL_MILLI_GAIN_X125 4 +#define VEML6035_SEL_MILLI_GAIN_X250 5 +#define VEML6035_SEL_MILLI_GAIN_X500 7 +#define VEML6035_SEL_MILLI_GAIN_X1000 0 +#define VEML6035_SEL_MILLI_GAIN_X2000 1 +#define VEML6035_SEL_MILLI_GAIN_X4000 3 +static const struct iio_gain_sel_pair veml6035_gain_sel[] = { + GAIN_SCALE_GAIN(1, VEML6035_SEL_MILLI_GAIN_X125), + GAIN_SCALE_GAIN(2, VEML6035_SEL_MILLI_GAIN_X250), + GAIN_SCALE_GAIN(4, VEML6035_SEL_MILLI_GAIN_X500), + GAIN_SCALE_GAIN(8, VEML6035_SEL_MILLI_GAIN_X1000), + GAIN_SCALE_GAIN(16, VEML6035_SEL_MILLI_GAIN_X2000), + GAIN_SCALE_GAIN(32, VEML6035_SEL_MILLI_GAIN_X4000), };
/* @@ -327,105 +359,73 @@ static const struct regmap_config veml6030_regmap_config = { .val_format_endian = REGMAP_ENDIAN_LITTLE, };
-static int veml6030_get_intgrn_tm(struct iio_dev *indio_dev, - int *val, int *val2) +static int veml6030_get_it(struct veml6030_data *data, int *val, int *val2) { - int ret, reg; - struct veml6030_data *data = iio_priv(indio_dev); + int ret, it_idx;
- ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, ®); - if (ret) { - dev_err(&data->client->dev, - "can't read als conf register %d\n", ret); + ret = regmap_field_read(data->rf.it, &it_idx); + if (ret) return ret; - }
- switch ((reg >> 6) & 0xF) { - case 0: - *val2 = 100000; - break; - case 1: - *val2 = 200000; - break; - case 2: - *val2 = 400000; - break; - case 3: - *val2 = 800000; - break; - case 8: - *val2 = 50000; - break; - case 12: - *val2 = 25000; - break; - default: - return -EINVAL; - } + ret = iio_gts_find_int_time_by_sel(&data->gts, it_idx); + if (ret < 0) + return ret;
+ *val2 = ret; *val = 0; + return IIO_VAL_INT_PLUS_MICRO; }
-static int veml6030_set_intgrn_tm(struct iio_dev *indio_dev, - int val, int val2) +static int veml6030_set_it(struct iio_dev *indio_dev, int val, int val2) { - int ret, new_int_time, int_idx; struct veml6030_data *data = iio_priv(indio_dev); + int ret, gain_idx, it_idx, new_gain, prev_gain, prev_it; + bool in_range;
- if (val) + if (val || !iio_gts_valid_time(&data->gts, val2)) return -EINVAL;
- switch (val2) { - case 25000: - new_int_time = 0x300; - int_idx = 5; - break; - case 50000: - new_int_time = 0x200; - int_idx = 4; - break; - case 100000: - new_int_time = 0x00; - int_idx = 3; - break; - case 200000: - new_int_time = 0x40; - int_idx = 2; - break; - case 400000: - new_int_time = 0x80; - int_idx = 1; - break; - case 800000: - new_int_time = 0xC0; - int_idx = 0; - break; - default: - return -EINVAL; - } + ret = regmap_field_read(data->rf.it, &it_idx); + if (ret) + return ret;
- ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF, - VEML6030_ALS_IT, new_int_time); - if (ret) { - dev_err(&data->client->dev, - "can't update als integration time %d\n", ret); + ret = regmap_field_read(data->rf.gain, &gain_idx); + if (ret) return ret; - }
- /* - * Cache current integration time and update resolution. For every - * increase in integration time to next level, resolution is halved - * and vice-versa. - */ - if (data->cur_integration_time < int_idx) - data->cur_resolution <<= int_idx - data->cur_integration_time; - else if (data->cur_integration_time > int_idx) - data->cur_resolution >>= data->cur_integration_time - int_idx; + prev_it = iio_gts_find_int_time_by_sel(&data->gts, it_idx); + if (prev_it < 0) + return prev_it; + + if (prev_it == val2) + return 0;
- data->cur_integration_time = int_idx; + prev_gain = iio_gts_find_gain_by_sel(&data->gts, gain_idx); + if (prev_gain < 0) + return prev_gain;
- return ret; + ret = iio_gts_find_new_gain_by_gain_time_min(&data->gts, prev_gain, prev_it, + val2, &new_gain, &in_range); + if (ret) + return ret; + + if (!in_range) + dev_dbg(&data->client->dev, "Optimal gain out of range\n"); + + ret = iio_gts_find_sel_by_int_time(&data->gts, val2); + if (ret < 0) + return ret; + + ret = regmap_field_write(data->rf.it, ret); + if (ret) + return ret; + + ret = iio_gts_find_sel_by_gain(&data->gts, new_gain); + if (ret < 0) + return ret; + + return regmap_field_write(data->rf.gain, ret); }
static int veml6030_read_persistence(struct iio_dev *indio_dev, @@ -434,7 +434,7 @@ static int veml6030_read_persistence(struct iio_dev *indio_dev, int ret, reg, period, x, y; struct veml6030_data *data = iio_priv(indio_dev);
- ret = veml6030_get_intgrn_tm(indio_dev, &x, &y); + ret = veml6030_get_it(data, &x, &y); if (ret < 0) return ret;
@@ -459,7 +459,7 @@ static int veml6030_write_persistence(struct iio_dev *indio_dev, int ret, period, x, y; struct veml6030_data *data = iio_priv(indio_dev);
- ret = veml6030_get_intgrn_tm(indio_dev, &x, &y); + ret = veml6030_get_it(data, &x, &y); if (ret < 0) return ret;
@@ -488,177 +488,29 @@ static int veml6030_write_persistence(struct iio_dev *indio_dev, return ret; }
-/* - * Cache currently set gain & update resolution. For every - * increase in the gain to next level, resolution is halved - * and vice-versa. - */ -static void veml6030_update_gain_res(struct veml6030_data *data, int gain_idx) -{ - if (data->cur_gain < gain_idx) - data->cur_resolution <<= gain_idx - data->cur_gain; - else if (data->cur_gain > gain_idx) - data->cur_resolution >>= data->cur_gain - gain_idx; - - data->cur_gain = gain_idx; -} - -static int veml6030_set_als_gain(struct iio_dev *indio_dev, - int val, int val2) +static int veml6030_set_scale(struct iio_dev *indio_dev, int val, int val2) { - int ret, new_gain, gain_idx; + int ret, gain_sel, it_idx, it_sel; struct veml6030_data *data = iio_priv(indio_dev);
- if (val == 0 && val2 == 125000) { - new_gain = 0x1000; /* 0x02 << 11 */ - gain_idx = 3; - } else if (val == 0 && val2 == 250000) { - new_gain = 0x1800; - gain_idx = 2; - } else if (val == 1 && val2 == 0) { - new_gain = 0x00; - gain_idx = 1; - } else if (val == 2 && val2 == 0) { - new_gain = 0x800; - gain_idx = 0; - } else { - return -EINVAL; - } - - ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF, - VEML6030_ALS_GAIN, new_gain); - if (ret) { - dev_err(&data->client->dev, - "can't set als gain %d\n", ret); + ret = regmap_field_read(data->rf.it, &it_idx); + if (ret) return ret; - }
- veml6030_update_gain_res(data, gain_idx); - - return 0; -} - -static int veml6035_set_als_gain(struct iio_dev *indio_dev, int val, int val2) -{ - int ret, new_gain, gain_idx; - struct veml6030_data *data = iio_priv(indio_dev); - - if (val == 0 && val2 == 125000) { - new_gain = VEML6035_SENS; - gain_idx = 5; - } else if (val == 0 && val2 == 250000) { - new_gain = VEML6035_SENS | VEML6035_GAIN; - gain_idx = 4; - } else if (val == 0 && val2 == 500000) { - new_gain = VEML6035_SENS | VEML6035_GAIN | - VEML6035_DG; - gain_idx = 3; - } else if (val == 1 && val2 == 0) { - new_gain = 0x0000; - gain_idx = 2; - } else if (val == 2 && val2 == 0) { - new_gain = VEML6035_GAIN; - gain_idx = 1; - } else if (val == 4 && val2 == 0) { - new_gain = VEML6035_GAIN | VEML6035_DG; - gain_idx = 0; - } else { - return -EINVAL; - } - - ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF, - VEML6035_GAIN_M, new_gain); - if (ret) { - dev_err(&data->client->dev, "can't set als gain %d\n", ret); + ret = iio_gts_find_gain_time_sel_for_scale(&data->gts, val, val2, + &gain_sel, &it_sel); + if (ret) return ret; - } - - veml6030_update_gain_res(data, gain_idx);
- return 0; -} - -static int veml6030_get_als_gain(struct iio_dev *indio_dev, - int *val, int *val2) -{ - int ret, reg; - struct veml6030_data *data = iio_priv(indio_dev); - - ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, ®); - if (ret) { - dev_err(&data->client->dev, - "can't read als conf register %d\n", ret); + ret = regmap_field_write(data->rf.it, it_sel); + if (ret) return ret; - } - - switch ((reg >> 11) & 0x03) { - case 0: - *val = 1; - *val2 = 0; - break; - case 1: - *val = 2; - *val2 = 0; - break; - case 2: - *val = 0; - *val2 = 125000; - break; - case 3: - *val = 0; - *val2 = 250000; - break; - default: - return -EINVAL; - } - - return IIO_VAL_INT_PLUS_MICRO; -} - -static int veml6035_get_als_gain(struct iio_dev *indio_dev, int *val, int *val2) -{ - int ret, reg; - struct veml6030_data *data = iio_priv(indio_dev);
- ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, ®); - if (ret) { - dev_err(&data->client->dev, - "can't read als conf register %d\n", ret); + ret = regmap_field_write(data->rf.gain, gain_sel); + if (ret) return ret; - }
- switch (FIELD_GET(VEML6035_GAIN_M, reg)) { - case 0: - *val = 1; - *val2 = 0; - break; - case 1: - case 2: - *val = 2; - *val2 = 0; - break; - case 3: - *val = 4; - *val2 = 0; - break; - case 4: - *val = 0; - *val2 = 125000; - break; - case 5: - case 6: - *val = 0; - *val2 = 250000; - break; - case 7: - *val = 0; - *val2 = 500000; - break; - default: - return -EINVAL; - } - - return IIO_VAL_INT_PLUS_MICRO; + return 0; }
static int veml6030_read_thresh(struct iio_dev *indio_dev, @@ -705,6 +557,71 @@ static int veml6030_write_thresh(struct iio_dev *indio_dev, return ret; }
+static int veml6030_get_total_gain(struct veml6030_data *data) +{ + int gain, it, reg, ret; + + ret = regmap_field_read(data->rf.gain, ®); + if (ret) + return ret; + + gain = iio_gts_find_gain_by_sel(&data->gts, reg); + if (gain < 0) + return gain; + + ret = regmap_field_read(data->rf.it, ®); + if (ret) + return ret; + + it = iio_gts_find_int_time_by_sel(&data->gts, reg); + if (it < 0) + return it; + + return iio_gts_get_total_gain(&data->gts, gain, it); +} + +static int veml6030_get_scale(struct veml6030_data *data, int *val, int *val2) +{ + int gain, it, reg, ret; + + ret = regmap_field_read(data->rf.gain, ®); + if (ret) + return ret; + + gain = iio_gts_find_gain_by_sel(&data->gts, reg); + if (gain < 0) + return gain; + + ret = regmap_field_read(data->rf.it, ®); + if (ret) + return ret; + + it = iio_gts_find_int_time_by_sel(&data->gts, reg); + if (it < 0) + return it; + + ret = iio_gts_get_scale(&data->gts, gain, it, val, val2); + if (ret) + return ret; + + return IIO_VAL_INT_PLUS_NANO; +} + +static int veml6030_process_als(struct veml6030_data *data, int raw, + int *val, int *val2) +{ + int total_gain; + + total_gain = veml6030_get_total_gain(data); + if (total_gain < 0) + return total_gain; + + *val = raw * data->chip->max_scale / total_gain / 10000; + *val2 = raw * data->chip->max_scale / total_gain % 10000 * 100; + + return IIO_VAL_INT_PLUS_MICRO; +} + /* * Provide both raw as well as light reading in lux. * light (in lux) = resolution * raw reading @@ -728,11 +645,9 @@ static int veml6030_read_raw(struct iio_dev *indio_dev, dev_err(dev, "can't read als data %d\n", ret); return ret; } - if (mask == IIO_CHAN_INFO_PROCESSED) { - *val = (reg * data->cur_resolution) / 10000; - *val2 = (reg * data->cur_resolution) % 10000 * 100; - return IIO_VAL_INT_PLUS_MICRO; - } + if (mask == IIO_CHAN_INFO_PROCESSED) + return veml6030_process_als(data, reg, val, val2); + *val = reg; return IIO_VAL_INT; case IIO_INTENSITY: @@ -747,9 +662,9 @@ static int veml6030_read_raw(struct iio_dev *indio_dev, return -EINVAL; } case IIO_CHAN_INFO_INT_TIME: - return veml6030_get_intgrn_tm(indio_dev, val, val2); + return veml6030_get_it(data, val, val2); case IIO_CHAN_INFO_SCALE: - return data->chip->get_als_gain(indio_dev, val, val2); + return veml6030_get_scale(data, val, val2); default: return -EINVAL; } @@ -764,15 +679,9 @@ static int veml6030_read_avail(struct iio_dev *indio_dev,
switch (mask) { case IIO_CHAN_INFO_INT_TIME: - *vals = (int *)&veml6030_it_times; - *length = 2 * ARRAY_SIZE(veml6030_it_times); - *type = IIO_VAL_INT_PLUS_MICRO; - return IIO_AVAIL_LIST; + return iio_gts_avail_times(&data->gts, vals, type, length); case IIO_CHAN_INFO_SCALE: - *vals = (int *)*data->chip->scale_vals; - *length = 2 * data->chip->num_scale_vals; - *type = IIO_VAL_INT_PLUS_MICRO; - return IIO_AVAIL_LIST; + return iio_gts_all_avail_scales(&data->gts, vals, type, length); }
return -EINVAL; @@ -782,13 +691,25 @@ static int veml6030_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { - struct veml6030_data *data = iio_priv(indio_dev); - switch (mask) { case IIO_CHAN_INFO_INT_TIME: - return veml6030_set_intgrn_tm(indio_dev, val, val2); + return veml6030_set_it(indio_dev, val, val2); + case IIO_CHAN_INFO_SCALE: + return veml6030_set_scale(indio_dev, val, val2); + default: + return -EINVAL; + } +} + +static int veml6030_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + long mask) +{ + switch (mask) { case IIO_CHAN_INFO_SCALE: - return data->chip->set_als_gain(indio_dev, val, val2); + return IIO_VAL_INT_PLUS_NANO; + case IIO_CHAN_INFO_INT_TIME: + return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } @@ -886,6 +807,7 @@ static const struct iio_info veml6030_info = { .read_raw = veml6030_read_raw, .read_avail = veml6030_read_avail, .write_raw = veml6030_write_raw, + .write_raw_get_fmt = veml6030_write_raw_get_fmt, .read_event_value = veml6030_read_event_val, .write_event_value = veml6030_write_event_val, .read_event_config = veml6030_read_interrupt_config, @@ -897,6 +819,7 @@ static const struct iio_info veml6030_info_no_irq = { .read_raw = veml6030_read_raw, .read_avail = veml6030_read_avail, .write_raw = veml6030_write_raw, + .write_raw_get_fmt = veml6030_write_raw_get_fmt, };
static irqreturn_t veml6030_event_handler(int irq, void *private) @@ -990,6 +913,27 @@ static int veml7700_set_info(struct iio_dev *indio_dev) return 0; }
+static int veml6030_regfield_init(struct iio_dev *indio_dev) +{ + struct veml6030_data *data = iio_priv(indio_dev); + struct regmap *regmap = data->regmap; + struct device *dev = &data->client->dev; + struct regmap_field *rm_field; + struct veml6030_rf *rf = &data->rf; + + rm_field = devm_regmap_field_alloc(dev, regmap, data->chip->it_rf); + if (IS_ERR(rm_field)) + return PTR_ERR(rm_field); + rf->it = rm_field; + + rm_field = devm_regmap_field_alloc(dev, regmap, data->chip->gain_rf); + if (IS_ERR(rm_field)) + return PTR_ERR(rm_field); + rf->gain = rm_field; + + return 0; +} + /* * Set ALS gain to 1/8, integration time to 100 ms, PSM to mode 2, * persistence to 1 x integration time and the threshold @@ -1001,6 +945,13 @@ static int veml6030_hw_init(struct iio_dev *indio_dev, struct device *dev) int ret, val; struct veml6030_data *data = iio_priv(indio_dev);
+ ret = devm_iio_init_iio_gts(dev, 2, 150400000, + veml6030_gain_sel, ARRAY_SIZE(veml6030_gain_sel), + veml6030_it_sel, ARRAY_SIZE(veml6030_it_sel), + &data->gts); + if (ret) + return dev_err_probe(dev, ret, "failed to init iio gts\n"); + ret = veml6030_als_shut_down(data); if (ret) return dev_err_probe(dev, ret, "can't shutdown als\n"); @@ -1036,11 +987,6 @@ static int veml6030_hw_init(struct iio_dev *indio_dev, struct device *dev) return dev_err_probe(dev, ret, "can't clear als interrupt status\n");
- /* Cache currently active measurement parameters */ - data->cur_gain = 3; - data->cur_resolution = 5376; - data->cur_integration_time = 3; - return ret; }
@@ -1056,6 +1002,13 @@ static int veml6035_hw_init(struct iio_dev *indio_dev, struct device *dev) int ret, val; struct veml6030_data *data = iio_priv(indio_dev);
+ ret = devm_iio_init_iio_gts(dev, 0, 409600000, + veml6035_gain_sel, ARRAY_SIZE(veml6035_gain_sel), + veml6030_it_sel, ARRAY_SIZE(veml6030_it_sel), + &data->gts); + if (ret) + return dev_err_probe(dev, ret, "failed to init iio gts\n"); + ret = veml6030_als_shut_down(data); if (ret) return dev_err_probe(dev, ret, "can't shutdown als\n"); @@ -1092,11 +1045,6 @@ static int veml6035_hw_init(struct iio_dev *indio_dev, struct device *dev) return dev_err_probe(dev, ret, "can't clear als interrupt status\n");
- /* Cache currently active measurement parameters */ - data->cur_gain = 5; - data->cur_resolution = 1024; - data->cur_integration_time = 3; - return 0; }
@@ -1143,6 +1091,11 @@ static int veml6030_probe(struct i2c_client *client) if (ret < 0) return ret;
+ ret = veml6030_regfield_init(indio_dev); + if (ret) + return dev_err_probe(&client->dev, ret, + "failed to init regfields\n"); + ret = data->chip->hw_init(indio_dev, &client->dev); if (ret < 0) return ret; @@ -1187,38 +1140,35 @@ static DEFINE_RUNTIME_DEV_PM_OPS(veml6030_pm_ops, veml6030_runtime_suspend,
static const struct veml603x_chip veml6030_chip = { .name = "veml6030", - .scale_vals = &veml6030_scale_vals, - .num_scale_vals = ARRAY_SIZE(veml6030_scale_vals), .channels = veml6030_channels, .num_channels = ARRAY_SIZE(veml6030_channels), + .gain_rf = VEML6030_GAIN_RF, + .it_rf = VEML6030_IT_RF, + .max_scale = VEML6030_MAX_SCALE, .hw_init = veml6030_hw_init, .set_info = veml6030_set_info, - .set_als_gain = veml6030_set_als_gain, - .get_als_gain = veml6030_get_als_gain, };
static const struct veml603x_chip veml6035_chip = { .name = "veml6035", - .scale_vals = &veml6035_scale_vals, - .num_scale_vals = ARRAY_SIZE(veml6035_scale_vals), .channels = veml6030_channels, .num_channels = ARRAY_SIZE(veml6030_channels), + .gain_rf = VEML6035_GAIN_RF, + .it_rf = VEML6030_IT_RF, + .max_scale = VEML6035_MAX_SCALE, .hw_init = veml6035_hw_init, .set_info = veml6030_set_info, - .set_als_gain = veml6035_set_als_gain, - .get_als_gain = veml6035_get_als_gain, };
static const struct veml603x_chip veml7700_chip = { .name = "veml7700", - .scale_vals = &veml6030_scale_vals, - .num_scale_vals = ARRAY_SIZE(veml6030_scale_vals), .channels = veml7700_channels, .num_channels = ARRAY_SIZE(veml7700_channels), + .gain_rf = VEML6030_GAIN_RF, + .it_rf = VEML6030_IT_RF, + .max_scale = VEML6030_MAX_SCALE, .hw_init = veml6030_hw_init, .set_info = veml7700_set_info, - .set_als_gain = veml6030_set_als_gain, - .get_als_gain = veml6030_get_als_gain, };
static const struct of_device_id veml6030_of_match[] = { @@ -1260,3 +1210,4 @@ module_i2c_driver(veml6030_driver); MODULE_AUTHOR("Rishi Gupta gupt21@gmail.com"); MODULE_DESCRIPTION("VEML6030 Ambient Light Sensor"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS("IIO_GTS_HELPER"); diff --git a/drivers/iio/light/veml6075.c b/drivers/iio/light/veml6075.c index 05d4c0e9015d..859891e8f115 100644 --- a/drivers/iio/light/veml6075.c +++ b/drivers/iio/light/veml6075.c @@ -195,13 +195,17 @@ static int veml6075_read_uv_direct(struct veml6075_data *data, int chan,
static int veml6075_read_int_time_index(struct veml6075_data *data) { - int ret, conf; + int ret, conf, int_index;
ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf); if (ret < 0) return ret;
- return FIELD_GET(VEML6075_CONF_IT, conf); + int_index = FIELD_GET(VEML6075_CONF_IT, conf); + if (int_index >= ARRAY_SIZE(veml6075_it_ms)) + return -EINVAL; + + return int_index; }
static int veml6075_read_int_time_ms(struct veml6075_data *data, int *val) diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 0ded91f056f3..ee75b99f84bc 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -528,6 +528,8 @@ static struct class ib_class = { static void rdma_init_coredev(struct ib_core_device *coredev, struct ib_device *dev, struct net *net) { + bool is_full_dev = &dev->coredev == coredev; + /* This BUILD_BUG_ON is intended to catch layout change * of union of ib_core_device and device. * dev must be the first element as ib_core and providers @@ -539,6 +541,13 @@ static void rdma_init_coredev(struct ib_core_device *coredev,
coredev->dev.class = &ib_class; coredev->dev.groups = dev->groups; + + /* + * Don't expose hw counters outside of the init namespace. + */ + if (!is_full_dev && dev->hw_stats_attr_index) + coredev->dev.groups[dev->hw_stats_attr_index] = NULL; + device_initialize(&coredev->dev); coredev->owner = dev; INIT_LIST_HEAD(&coredev->port_list); @@ -1341,9 +1350,11 @@ static void ib_device_notify_register(struct ib_device *device) u32 port; int ret;
+ down_read(&devices_rwsem); + ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT); if (ret) - return; + goto out;
rdma_for_each_port(device, port) { netdev = ib_device_get_netdev(device, port); @@ -1354,8 +1365,11 @@ static void ib_device_notify_register(struct ib_device *device) RDMA_NETDEV_ATTACH_EVENT); dev_put(netdev); if (ret) - return; + goto out; } + +out: + up_read(&devices_rwsem); }
/** diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 1fd54d5c4dd8..73f3a0b9a54b 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2671,11 +2671,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad) { unsigned long flags; - int post, ret; struct ib_mad_private *mad_priv; struct ib_sge sg_list; struct ib_recv_wr recv_wr; struct ib_mad_queue *recv_queue = &qp_info->recv_queue; + int ret = 0;
/* Initialize common scatter list fields */ sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; @@ -2685,7 +2685,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, recv_wr.sg_list = &sg_list; recv_wr.num_sge = 1;
- do { + while (true) { /* Allocate and map receive buffer */ if (mad) { mad_priv = mad; @@ -2693,10 +2693,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, } else { mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), GFP_ATOMIC); - if (!mad_priv) { - ret = -ENOMEM; - break; - } + if (!mad_priv) + return -ENOMEM; } sg_list.length = mad_priv_dma_size(mad_priv); sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, @@ -2705,37 +2703,41 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { - kfree(mad_priv); ret = -ENOMEM; - break; + goto free_mad_priv; } mad_priv->header.mapping = sg_list.addr; mad_priv->header.mad_list.mad_queue = recv_queue; mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; - - /* Post receive WR */ spin_lock_irqsave(&recv_queue->lock, flags); - post = (++recv_queue->count < recv_queue->max_active); - list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); + if (recv_queue->count >= recv_queue->max_active) { + /* Fully populated the receive queue */ + spin_unlock_irqrestore(&recv_queue->lock, flags); + break; + } + recv_queue->count++; + list_add_tail(&mad_priv->header.mad_list.list, + &recv_queue->list); spin_unlock_irqrestore(&recv_queue->lock, flags); + ret = ib_post_recv(qp_info->qp, &recv_wr, NULL); if (ret) { spin_lock_irqsave(&recv_queue->lock, flags); list_del(&mad_priv->header.mad_list.list); recv_queue->count--; spin_unlock_irqrestore(&recv_queue->lock, flags); - ib_dma_unmap_single(qp_info->port_priv->device, - mad_priv->header.mapping, - mad_priv_dma_size(mad_priv), - DMA_FROM_DEVICE); - kfree(mad_priv); dev_err(&qp_info->port_priv->device->dev, "ib_post_recv failed: %d\n", ret); break; } - } while (post); + }
+ ib_dma_unmap_single(qp_info->port_priv->device, + mad_priv->header.mapping, + mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE); +free_mad_priv: + kfree(mad_priv); return ret; }
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 9f97bef02149..210092b9bf17 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -988,6 +988,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev) for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++) if (!ibdev->groups[i]) { ibdev->groups[i] = &data->group; + ibdev->hw_stats_attr_index = i; return 0; } WARN(true, "struct ib_device->groups is too small"); diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c index 1b23c698ec25..e0acc185e719 100644 --- a/drivers/infiniband/hw/erdma/erdma_cm.c +++ b/drivers/infiniband/hw/erdma/erdma_cm.c @@ -709,7 +709,6 @@ static void erdma_accept_newconn(struct erdma_cep *cep) erdma_cancel_mpatimer(new_cep);
erdma_cep_put(new_cep); - new_cep->sock = NULL; }
if (new_s) { diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index 457cea6d9909..f6bf289041bf 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -358,7 +358,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem unsigned int tail = 0; u64 *page_addr_list; void *request_buf; - int err; + int err = 0;
gc = mdev_to_gc(dev); hwc = gc->hwc.driver_data; diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 4c54dc578069..1aa5311b03e9 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -490,7 +490,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, }
qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; - if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { + if (!*cur_qp || (qpn != (*cur_qp)->trans_qp.base.mqp.qpn)) { /* We do not have to take the QP table lock here, * because CQs will be locked while QPs are removed * from the table. diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 753faa9ad06a..068eac3bdb50 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -56,7 +56,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context); static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, u64 iova, int access_flags, - unsigned int page_size, bool populate, + unsigned long page_size, bool populate, int access_mode); static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr);
@@ -919,6 +919,25 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, return ERR_PTR(ret); }
+static void mlx5r_destroy_cache_entries(struct mlx5_ib_dev *dev) +{ + struct rb_root *root = &dev->cache.rb_root; + struct mlx5_cache_ent *ent; + struct rb_node *node; + + mutex_lock(&dev->cache.rb_lock); + node = rb_first(root); + while (node) { + ent = rb_entry(node, struct mlx5_cache_ent, node); + node = rb_next(node); + clean_keys(dev, ent); + rb_erase(&ent->node, root); + mlx5r_mkeys_uninit(ent); + kfree(ent); + } + mutex_unlock(&dev->cache.rb_lock); +} + int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) { struct mlx5_mkey_cache *cache = &dev->cache; @@ -970,6 +989,8 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) err: mutex_unlock(&cache->rb_lock); mlx5_mkey_cache_debugfs_cleanup(dev); + mlx5r_destroy_cache_entries(dev); + destroy_workqueue(cache->wq); mlx5_ib_warn(dev, "failed to create mkey cache entry\n"); return ret; } @@ -1003,17 +1024,7 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
/* At this point all entries are disabled and have no concurrent work. */ - mutex_lock(&dev->cache.rb_lock); - node = rb_first(root); - while (node) { - ent = rb_entry(node, struct mlx5_cache_ent, node); - node = rb_next(node); - clean_keys(dev, ent); - rb_erase(&ent->node, root); - mlx5r_mkeys_uninit(ent); - kfree(ent); - } - mutex_unlock(&dev->cache.rb_lock); + mlx5r_destroy_cache_entries(dev);
destroy_workqueue(dev->cache.wq); del_timer_sync(&dev->delay_timer); @@ -1115,7 +1126,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, struct mlx5r_cache_rb_key rb_key = {}; struct mlx5_cache_ent *ent; struct mlx5_ib_mr *mr; - unsigned int page_size; + unsigned long page_size;
if (umem->is_dmabuf) page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); @@ -1219,7 +1230,7 @@ reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_f */ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, u64 iova, int access_flags, - unsigned int page_size, bool populate, + unsigned long page_size, bool populate, int access_mode) { struct mlx5_ib_dev *dev = to_mdev(pd->device); @@ -1425,7 +1436,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem, mr = alloc_cacheable_mr(pd, umem, iova, access_flags, MLX5_MKC_ACCESS_MODE_MTT); } else { - unsigned int page_size = + unsigned long page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
mutex_lock(&dev->slow_path_mutex); diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index e77c9280c07e..86d8fa63bf69 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -309,9 +309,6 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, blk_start_idx = idx; in_block = 1; } - - /* Count page invalidations */ - invalidations += idx - blk_start_idx + 1; } else { u64 umr_offset = idx & umr_block_mask;
@@ -321,14 +318,19 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); in_block = 0; + /* Count page invalidations */ + invalidations += idx - blk_start_idx + 1; } } } - if (in_block) + if (in_block) { mlx5r_umr_update_xlt(mr, blk_start_idx, idx - blk_start_idx + 1, 0, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); + /* Count page invalidations */ + invalidations += idx - blk_start_idx + 1; + }
mlx5_update_odp_stats_with_handled(mr, invalidations, invalidations);
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 68debf5ee2d7..e3bf27da1339 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -176,12 +176,11 @@ void amd_iommu_apply_ivrs_quirks(void); #else static inline void amd_iommu_apply_ivrs_quirks(void) { } #endif +struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid);
void amd_iommu_domain_set_pgtable(struct protection_domain *domain, u64 *root, int mode); struct dev_table_entry *get_dev_table(struct amd_iommu *iommu); - -#endif - -struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid); struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid); + +#endif /* AMD_IOMMU_H */ diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index bf1f0c814348..25d31f8c129a 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2871,16 +2871,19 @@ void intel_iommu_shutdown(void) if (no_iommu || dmar_disabled) return;
- down_write(&dmar_global_lock); + /* + * All other CPUs were brought down, hotplug interrupts were disabled, + * no lock and RCU checking needed anymore + */ + list_for_each_entry(drhd, &dmar_drhd_units, list) { + iommu = drhd->iommu;
- /* Disable PMRs explicitly here. */ - for_each_iommu(iommu, drhd) + /* Disable PMRs explicitly here. */ iommu_disable_protect_mem_regions(iommu);
- /* Make sure the IOMMUs are switched off */ - intel_disable_iommus(); - - up_write(&dmar_global_lock); + /* Make sure the IOMMUs are switched off */ + iommu_disable_translation(iommu); + } }
static struct intel_iommu *dev_to_intel_iommu(struct device *dev) diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c index c004640640ee..06aca9ab52f9 100644 --- a/drivers/iommu/io-pgtable-dart.c +++ b/drivers/iommu/io-pgtable-dart.c @@ -135,7 +135,6 @@ static int dart_init_pte(struct dart_io_pgtable *data, pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_START, 0); pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_END, 0xfff);
- pte |= APPLE_DART1_PTE_PROT_SP_DIS; pte |= APPLE_DART_PTE_VALID;
for (i = 0; i < num_entries; i++) @@ -211,6 +210,7 @@ static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data, dart_iopte pte = 0;
if (data->iop.fmt == APPLE_DART) { + pte |= APPLE_DART1_PTE_PROT_SP_DIS; if (!(prot & IOMMU_WRITE)) pte |= APPLE_DART1_PTE_PROT_NO_WRITE; if (!(prot & IOMMU_READ)) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 60aed01e54f2..e3df1f06afbe 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -3097,6 +3097,11 @@ int iommu_device_use_default_domain(struct device *dev) return 0;
mutex_lock(&group->mutex); + /* We may race against bus_iommu_probe() finalising groups here */ + if (!group->default_domain) { + ret = -EPROBE_DEFER; + goto unlock_out; + } if (group->owner_cnt) { if (group->domain != group->default_domain || group->owner || !xa_empty(&group->pasid_array)) { diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index f6c46d2e5276..e3d8ddcff567 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -159,8 +159,19 @@ static void set_brightness_delayed(struct work_struct *ws) * before this work item runs once. To make sure this works properly * handle LED_SET_BRIGHTNESS_OFF first. */ - if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) + if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) { set_brightness_delayed_set_brightness(led_cdev, LED_OFF); + /* + * The consecutives led_set_brightness(LED_OFF), + * led_set_brightness(LED_FULL) could have been executed out of + * order (LED_FULL first), if the work_flags has been set + * between LED_SET_BRIGHTNESS_OFF and LED_SET_BRIGHTNESS of this + * work. To avoid ending with the LED turned off, turn the LED + * on again. + */ + if (led_cdev->delayed_set_value != LED_OFF) + set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); + }
if (test_and_clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags)) set_brightness_delayed_set_brightness(led_cdev, led_cdev->delayed_set_value); @@ -331,10 +342,13 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value) * change is done immediately afterwards (before the work runs), * it uses a separate work_flag. */ - if (value) { - led_cdev->delayed_set_value = value; + led_cdev->delayed_set_value = value; + /* Ensure delayed_set_value is seen before work_flags modification */ + smp_mb__before_atomic(); + + if (value) set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); - } else { + else { clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); clear_bit(LED_SET_BLINK, &led_cdev->work_flags); set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags); diff --git a/drivers/leds/leds-st1202.c b/drivers/leds/leds-st1202.c index e894b3f9a0f4..4cebc0203c22 100644 --- a/drivers/leds/leds-st1202.c +++ b/drivers/leds/leds-st1202.c @@ -345,7 +345,9 @@ static int st1202_probe(struct i2c_client *client) if (!chip) return -ENOMEM;
- devm_mutex_init(&client->dev, &chip->lock); + ret = devm_mutex_init(&client->dev, &chip->lock); + if (ret < 0) + return ret; chip->client = client;
ret = st1202_dt_init(chip); diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 23c09d22fcdb..9ae6cc8e30cb 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -426,8 +426,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, struct block_device *bdev; struct mddev *mddev = bitmap->mddev; struct bitmap_storage *store = &bitmap->storage; - unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) << - PAGE_SHIFT; + unsigned long num_pages = bitmap->storage.file_pages; + unsigned int bitmap_limit = (num_pages - pg_index % num_pages) << PAGE_SHIFT; loff_t sboff, offset = mddev->bitmap_info.offset; sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE; unsigned int size = PAGE_SIZE; @@ -436,7 +436,7 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; /* we compare length (page numbers), not page offset. */ - if ((pg_index - store->sb_index) == store->file_pages - 1) { + if ((pg_index - store->sb_index) == num_pages - 1) { unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
if (last_page_size == 0) diff --git a/drivers/md/md.c b/drivers/md/md.c index 30b3dbbce2d2..ef859ccb0366 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -629,6 +629,12 @@ static void __mddev_put(struct mddev *mddev) queue_work(md_misc_wq, &mddev->del_work); }
+static void mddev_put_locked(struct mddev *mddev) +{ + if (atomic_dec_and_test(&mddev->active)) + __mddev_put(mddev); +} + void mddev_put(struct mddev *mddev) { if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) @@ -1748,7 +1754,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ count <<= sb->bblog_shift; if (bb + 1 == 0) break; - if (badblocks_set(&rdev->badblocks, sector, count, 1)) + if (!badblocks_set(&rdev->badblocks, sector, count, 1)) return -EINVAL; } } else if (sb->bblog_offset != 0) @@ -8461,9 +8467,7 @@ static int md_seq_show(struct seq_file *seq, void *v) if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs)) status_unused(seq);
- if (atomic_dec_and_test(&mddev->active)) - __mddev_put(mddev); - + mddev_put_locked(mddev); return 0; }
@@ -9460,6 +9464,13 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares) return true; }
+ /* Check if resync is in progress. */ + if (mddev->recovery_cp < MaxSector) { + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); + return true; + } + /* * Remove any failed drives, then add spares if possible. Spares are * also removed and re-added, to allow the personality to fail the @@ -9476,13 +9487,6 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares) return true; }
- /* Check if recovery is in progress. */ - if (mddev->recovery_cp < MaxSector) { - set_bit(MD_RECOVERY_SYNC, &mddev->recovery); - clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); - return true; - } - /* Delay to choose resync/check/repair in md_do_sync(). */ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) return true; @@ -9846,7 +9850,6 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new) { struct mddev *mddev = rdev->mddev; - int rv;
/* * Recording new badblocks for faulty rdev will force unnecessary @@ -9862,44 +9865,46 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, s += rdev->new_data_offset; else s += rdev->data_offset; - rv = badblocks_set(&rdev->badblocks, s, sectors, 0); - if (rv == 0) { - /* Make sure they get written out promptly */ - if (test_bit(ExternalBbl, &rdev->flags)) - sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks); - sysfs_notify_dirent_safe(rdev->sysfs_state); - set_mask_bits(&mddev->sb_flags, 0, - BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); - md_wakeup_thread(rdev->mddev->thread); - return 1; - } else + + if (!badblocks_set(&rdev->badblocks, s, sectors, 0)) return 0; + + /* Make sure they get written out promptly */ + if (test_bit(ExternalBbl, &rdev->flags)) + sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks); + sysfs_notify_dirent_safe(rdev->sysfs_state); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); + md_wakeup_thread(rdev->mddev->thread); + return 1; } EXPORT_SYMBOL_GPL(rdev_set_badblocks);
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new) { - int rv; if (is_new) s += rdev->new_data_offset; else s += rdev->data_offset; - rv = badblocks_clear(&rdev->badblocks, s, sectors); - if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags)) + + if (!badblocks_clear(&rdev->badblocks, s, sectors)) + return 0; + + if (test_bit(ExternalBbl, &rdev->flags)) sysfs_notify_dirent_safe(rdev->sysfs_badblocks); - return rv; + return 1; } EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
static int md_notify_reboot(struct notifier_block *this, unsigned long code, void *x) { - struct mddev *mddev, *n; + struct mddev *mddev; int need_delay = 0;
spin_lock(&all_mddevs_lock); - list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { + list_for_each_entry(mddev, &all_mddevs, all_mddevs) { if (!mddev_get(mddev)) continue; spin_unlock(&all_mddevs_lock); @@ -9911,8 +9916,8 @@ static int md_notify_reboot(struct notifier_block *this, mddev_unlock(mddev); } need_delay = 1; - mddev_put(mddev); spin_lock(&all_mddevs_lock); + mddev_put_locked(mddev); } spin_unlock(&all_mddevs_lock);
@@ -10245,7 +10250,7 @@ void md_autostart_arrays(int part)
static __exit void md_exit(void) { - struct mddev *mddev, *n; + struct mddev *mddev; int delay = 1;
unregister_blkdev(MD_MAJOR,"md"); @@ -10266,7 +10271,7 @@ static __exit void md_exit(void) remove_proc_entry("mdstat", NULL);
spin_lock(&all_mddevs_lock); - list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { + list_for_each_entry(mddev, &all_mddevs, all_mddevs) { if (!mddev_get(mddev)) continue; spin_unlock(&all_mddevs_lock); @@ -10278,8 +10283,8 @@ static __exit void md_exit(void) * the mddev for destruction by a workqueue, and the * destroy_workqueue() below will wait for that to complete. */ - mddev_put(mddev); spin_lock(&all_mddevs_lock); + mddev_put_locked(mddev); } spin_unlock(&all_mddevs_lock);
diff --git a/drivers/md/md.h b/drivers/md/md.h index def808064ad8..cc31c795369d 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -266,8 +266,8 @@ enum flag_bits { Nonrot, /* non-rotational device (SSD) */ };
-static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, - sector_t *first_bad, int *bad_sectors) +static inline int is_badblock(struct md_rdev *rdev, sector_t s, sector_t sectors, + sector_t *first_bad, sector_t *bad_sectors) { if (unlikely(rdev->badblocks.count)) { int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s, @@ -284,7 +284,7 @@ static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s, int sectors) { sector_t first_bad; - int bad_sectors; + sector_t bad_sectors;
return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors); } diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 4378d3250bd7..62b980b12f93 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -247,7 +247,7 @@ static inline int raid1_check_read_range(struct md_rdev *rdev, sector_t this_sector, int *len) { sector_t first_bad; - int bad_sectors; + sector_t bad_sectors;
/* no bad block overlap */ if (!is_badblock(rdev, this_sector, *len, &first_bad, &bad_sectors)) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 10ea3af40991..15829ab192d2 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -45,6 +45,7 @@
static void allow_barrier(struct r1conf *conf, sector_t sector_nr); static void lower_barrier(struct r1conf *conf, sector_t sector_nr); +static void raid1_free(struct mddev *mddev, void *priv);
#define RAID_1_10_NAME "raid1" #include "raid1-10.c" @@ -1315,8 +1316,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, struct r1conf *conf = mddev->private; struct raid1_info *mirror; struct bio *read_bio; - const enum req_op op = bio_op(bio); - const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; int max_sectors; int rdisk, error; bool r1bio_existed = !!r1_bio; @@ -1404,7 +1403,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, read_bio->bi_iter.bi_sector = r1_bio->sector + mirror->rdev->data_offset; read_bio->bi_end_io = raid1_end_read_request; - read_bio->bi_opf = op | do_sync; if (test_bit(FailFast, &mirror->rdev->flags) && test_bit(R1BIO_FailFast, &r1_bio->state)) read_bio->bi_opf |= MD_FAILFAST; @@ -1537,7 +1535,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, atomic_inc(&rdev->nr_pending); if (test_bit(WriteErrorSeen, &rdev->flags)) { sector_t first_bad; - int bad_sectors; + sector_t bad_sectors; int is_bad;
is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, @@ -1653,8 +1651,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset); mbio->bi_end_io = raid1_end_write_request; - mbio->bi_opf = bio_op(bio) | - (bio->bi_opf & (REQ_SYNC | REQ_FUA | REQ_ATOMIC)); if (test_bit(FailFast, &rdev->flags) && !test_bit(WriteMostly, &rdev->flags) && conf->raid_disks - mddev->degraded > 1) @@ -2886,7 +2882,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, } else { /* may need to read from here */ sector_t first_bad = MaxSector; - int bad_sectors; + sector_t bad_sectors;
if (is_badblock(rdev, sector_nr, good_sectors, &first_bad, &bad_sectors)) { @@ -3256,8 +3252,11 @@ static int raid1_run(struct mddev *mddev)
if (!mddev_is_dm(mddev)) { ret = raid1_set_limits(mddev); - if (ret) + if (ret) { + if (!mddev->private) + raid1_free(mddev, conf); return ret; + } }
mddev->degraded = 0; @@ -3271,6 +3270,8 @@ static int raid1_run(struct mddev *mddev) */ if (conf->raid_disks - mddev->degraded < 1) { md_unregister_thread(mddev, &conf->thread); + if (!mddev->private) + raid1_free(mddev, conf); return -EINVAL; }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 15b9ae5bf84d..af010b64be63 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -747,7 +747,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
for (slot = 0; slot < conf->copies ; slot++) { sector_t first_bad; - int bad_sectors; + sector_t bad_sectors; sector_t dev_sector; unsigned int pending; bool nonrot; @@ -1146,8 +1146,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, { struct r10conf *conf = mddev->private; struct bio *read_bio; - const enum req_op op = bio_op(bio); - const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; int max_sectors; struct md_rdev *rdev; char b[BDEVNAME_SIZE]; @@ -1228,7 +1226,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); read_bio->bi_end_io = raid10_end_read_request; - read_bio->bi_opf = op | do_sync; if (test_bit(FailFast, &rdev->flags) && test_bit(R10BIO_FailFast, &r10_bio->state)) read_bio->bi_opf |= MD_FAILFAST; @@ -1247,10 +1244,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, struct bio *bio, bool replacement, int n_copy) { - const enum req_op op = bio_op(bio); - const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; - const blk_opf_t do_fua = bio->bi_opf & REQ_FUA; - const blk_opf_t do_atomic = bio->bi_opf & REQ_ATOMIC; unsigned long flags; struct r10conf *conf = mddev->private; struct md_rdev *rdev; @@ -1269,7 +1262,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + choose_data_offset(r10_bio, rdev)); mbio->bi_end_io = raid10_end_write_request; - mbio->bi_opf = op | do_sync | do_fua | do_atomic; if (!replacement && test_bit(FailFast, &conf->mirrors[devnum].rdev->flags) && enough(conf, devnum)) @@ -1438,7 +1430,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { sector_t first_bad; sector_t dev_sector = r10_bio->devs[i].addr; - int bad_sectors; + sector_t bad_sectors; int is_bad;
is_bad = is_badblock(rdev, dev_sector, max_sectors, @@ -1631,11 +1623,10 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) return -EAGAIN;
- if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) { + if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) { bio_wouldblock_error(bio); return 0; } - wait_barrier(conf, false);
/* * Check reshape again to avoid reshape happens after checking @@ -3413,7 +3404,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, sector_t from_addr, to_addr; struct md_rdev *rdev = conf->mirrors[d].rdev; sector_t sector, first_bad; - int bad_sectors; + sector_t bad_sectors; if (!rdev || !test_bit(In_sync, &rdev->flags)) continue; @@ -3609,7 +3600,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, for (i = 0; i < conf->copies; i++) { int d = r10_bio->devs[i].devnum; sector_t first_bad, sector; - int bad_sectors; + sector_t bad_sectors; struct md_rdev *rdev;
if (r10_bio->devs[i].repl_bio) diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 2f5165918163..cfe59c3255f7 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -2701,8 +2701,11 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz) u8 ratio;
if (state->revision == 0x8090) { + u32 internal = dib8000_read32(state, 23) / 1000; + ratio = 4; - unit_khz_dds_val = (1<<26) / (dib8000_read32(state, 23) / 1000); + + unit_khz_dds_val = (1<<26) / (internal ?: 1); if (offset_khz < 0) dds = (1 << 26) - (abs_offset_khz * unit_khz_dds_val); else diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c index e491399afcc9..eb03df0d8652 100644 --- a/drivers/media/platform/allegro-dvt/allegro-core.c +++ b/drivers/media/platform/allegro-dvt/allegro-core.c @@ -3912,6 +3912,7 @@ static int allegro_probe(struct platform_device *pdev) if (ret < 0) { v4l2_err(&dev->v4l2_dev, "failed to request firmware: %d\n", ret); + v4l2_device_unregister(&dev->v4l2_dev); return ret; }
diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c index 405ca215179d..a7fd808aea1e 100644 --- a/drivers/media/platform/ti/omap3isp/isp.c +++ b/drivers/media/platform/ti/omap3isp/isp.c @@ -1961,6 +1961,13 @@ static int isp_attach_iommu(struct isp_device *isp) struct dma_iommu_mapping *mapping; int ret;
+ /* We always want to replace any default mapping from the arch code */ + mapping = to_dma_iommu_mapping(isp->dev); + if (mapping) { + arm_iommu_detach_device(isp->dev); + arm_iommu_release_mapping(mapping); + } + /* * Create the ARM mapping, used by the ARM DMA mapping core to allocate * VAs. This will allocate a corresponding IOMMU domain. diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c index 85a44143b378..0e212198dd65 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c +++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c @@ -518,6 +518,7 @@ static void set_buffers(struct hantro_ctx *ctx) hantro_reg_write(vpu, &g2_stream_len, src_len); hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len); hantro_reg_write(vpu, &g2_strm_start_offset, 0); + hantro_reg_write(vpu, &g2_start_bit, 0); hantro_reg_write(vpu, &g2_write_mvs_e, 1);
hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, ctx->hevc_dec.tile_sizes.dma); diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c index 9b209e687f25..2ce62fe5d60f 100644 --- a/drivers/media/rc/streamzap.c +++ b/drivers/media/rc/streamzap.c @@ -385,8 +385,8 @@ static void streamzap_disconnect(struct usb_interface *interface) if (!sz) return;
- rc_unregister_device(sz->rdev); usb_kill_urb(sz->urb_in); + rc_unregister_device(sz->rdev); usb_free_urb(sz->urb_in); usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in);
diff --git a/drivers/media/test-drivers/vimc/vimc-streamer.c b/drivers/media/test-drivers/vimc/vimc-streamer.c index 807551a5143b..15d863f97cbf 100644 --- a/drivers/media/test-drivers/vimc/vimc-streamer.c +++ b/drivers/media/test-drivers/vimc/vimc-streamer.c @@ -59,6 +59,12 @@ static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream) continue;
sd = media_entity_to_v4l2_subdev(ved->ent); + /* + * Do not call .s_stream() to stop an already + * stopped/unstarted subdev. + */ + if (!v4l2_subdev_is_streaming(sd)) + continue; v4l2_subdev_call(sd, video, s_stream, 0); } } diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 5710348f72f6..a8f5467d6b31 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -332,6 +332,38 @@ static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = { [25] = {0x01}, };
+static const u8 mtk_smi_larb_mt8192_ostd[][SMI_LARB_PORT_NR_MAX] = { + [0] = {0x2, 0x2, 0x28, 0xa, 0xc, 0x28,}, + [1] = {0x2, 0x2, 0x18, 0x18, 0x18, 0xa, 0xc, 0x28,}, + [2] = {0x5, 0x5, 0x5, 0x5, 0x1,}, + [3] = {}, + [4] = {0x28, 0x19, 0xb, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x1,}, + [5] = {0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x16,}, + [6] = {}, + [7] = {0x1, 0x3, 0x2, 0x1, 0x1, 0x5, 0x2, 0x12, 0x13, 0x4, 0x4, 0x1, + 0x4, 0x2, 0x1,}, + [8] = {}, + [9] = {0xa, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4, + 0xa, 0x3, 0x4, 0xe, 0x1, 0x7, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, + 0x1, 0x1, 0x1, 0x1, 0x1,}, + [10] = {}, + [11] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, + 0x1, 0x1, 0x1, 0xe, 0x1, 0x7, 0x8, 0x7, 0x7, 0x1, 0x6, 0x2, + 0xf, 0x8, 0x1, 0x1, 0x1,}, + [12] = {}, + [13] = {0x2, 0xc, 0xc, 0xe, 0x6, 0x6, 0x6, 0x6, 0x6, 0x12, 0x6, 0x28, + 0x2, 0xc, 0xc, 0x28, 0x12, 0x6,}, + [14] = {}, + [15] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2, + 0x4, 0x2, 0x8, 0x4, 0x4,}, + [16] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2, + 0x4, 0x2, 0x8, 0x4, 0x4,}, + [17] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2, + 0x4, 0x2, 0x8, 0x4, 0x4,}, + [18] = {0x2, 0x2, 0x4, 0x2,}, + [19] = {0x9, 0x9, 0x5, 0x5, 0x1, 0x1,}, +}; + static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = { [0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */ [1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */ @@ -427,6 +459,7 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8188 = {
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = { .config_port = mtk_smi_larb_config_port_gen2_general, + .ostd = mtk_smi_larb_mt8192_ostd, };
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = { diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 0469e85d72cf..7ee293b09f62 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -920,7 +920,7 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); void __iomem *regs = smchip->regbase; unsigned long save; unsigned long val; @@ -946,7 +946,7 @@ static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset) struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; void __iomem *regs = smchip->regbase; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); unsigned long save; unsigned long ddr;
@@ -971,7 +971,7 @@ static int sm501_gpio_output(struct gpio_chip *chip, { struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); void __iomem *regs = smchip->regbase; unsigned long save; unsigned long val; diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index d5ac71a49386..9dac7cbe8748 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -272,9 +272,9 @@ static const u32 bar_test_pattern[] = { };
static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test, - enum pci_barno barno, int offset, - void *write_buf, void *read_buf, - int size) + enum pci_barno barno, + resource_size_t offset, void *write_buf, + void *read_buf, int size) { memset(write_buf, bar_test_pattern[barno], size); memcpy_toio(test->bar[barno] + offset, write_buf, size); @@ -287,10 +287,11 @@ static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test, static int pci_endpoint_test_bar(struct pci_endpoint_test *test, enum pci_barno barno) { - int j, bar_size, buf_size, iters; + resource_size_t bar_size, offset = 0; void *write_buf __free(kfree) = NULL; void *read_buf __free(kfree) = NULL; struct pci_dev *pdev = test->pdev; + int buf_size;
if (!test->bar[barno]) return -ENOMEM; @@ -314,11 +315,12 @@ static int pci_endpoint_test_bar(struct pci_endpoint_test *test, if (!read_buf) return -ENOMEM;
- iters = bar_size / buf_size; - for (j = 0; j < iters; j++) - if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j, - write_buf, read_buf, buf_size)) + while (offset < bar_size) { + if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf, + read_buf, buf_size)) return -EIO; + offset += buf_size; + }
return 0; } @@ -382,7 +384,7 @@ static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test, static int pci_endpoint_test_bars(struct pci_endpoint_test *test) { enum pci_barno bar; - bool ret; + int ret;
/* Write all BARs in order (without reading). */ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) @@ -398,7 +400,7 @@ static int pci_endpoint_test_bars(struct pci_endpoint_test *test) for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { if (test->bar[bar]) { ret = pci_endpoint_test_bars_read_bar(test, bar); - if (!ret) + if (ret) return ret; } } diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 62252ad4e20d..3cdb2fc44965 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -1272,19 +1272,25 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) /* Check for some optional GPIO controls */ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd", id, GPIOD_OUT_LOW); - if (IS_ERR(slot->vsd)) - return dev_err_probe(host->dev, PTR_ERR(slot->vsd), + if (IS_ERR(slot->vsd)) { + r = dev_err_probe(host->dev, PTR_ERR(slot->vsd), "error looking up VSD GPIO\n"); + goto err_free_host; + } slot->vio = devm_gpiod_get_index_optional(host->dev, "vio", id, GPIOD_OUT_LOW); - if (IS_ERR(slot->vio)) - return dev_err_probe(host->dev, PTR_ERR(slot->vio), + if (IS_ERR(slot->vio)) { + r = dev_err_probe(host->dev, PTR_ERR(slot->vio), "error looking up VIO GPIO\n"); + goto err_free_host; + } slot->cover = devm_gpiod_get_index_optional(host->dev, "cover", id, GPIOD_IN); - if (IS_ERR(slot->cover)) - return dev_err_probe(host->dev, PTR_ERR(slot->cover), + if (IS_ERR(slot->cover)) { + r = dev_err_probe(host->dev, PTR_ERR(slot->cover), "error looking up cover switch GPIO\n"); + goto err_free_host; + }
host->slots[id] = slot;
@@ -1344,6 +1350,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) device_remove_file(&mmc->class_dev, &dev_attr_slot_name); err_remove_host: mmc_remove_host(mmc); +err_free_host: mmc_free_host(mmc); return r; } diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 54d795205fb4..26a9a8b5682a 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1339,8 +1339,8 @@ static int sdhci_omap_probe(struct platform_device *pdev) /* R1B responses is required to properly manage HW busy detection. */ mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
- /* Allow card power off and runtime PM for eMMC/SD card devices */ - mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM; + /* Enable SDIO card power off. */ + mmc->caps |= MMC_CAP_POWER_OFF_CARD;
ret = sdhci_setup_host(host); if (ret) diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 990723a008ae..3fb56face3d8 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -399,6 +399,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (!IS_ERR(pxa->clk_core)) clk_prepare_enable(pxa->clk_core);
+ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY; /* enable 1/8V DDR capable */ host->mmc->caps |= MMC_CAP_1_8V_DDR;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index c5e571ec94c9..0472bcdff130 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -251,18 +251,33 @@ static int com20020pci_probe(struct pci_dev *pdev, card->tx_led.default_trigger = devm_kasprintf(&pdev->dev, GFP_KERNEL, "arc%d-%d-tx", dev->dev_id, i); + if (!card->tx_led.default_trigger) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pci:green:tx:%d-%d", dev->dev_id, i); - + if (!card->tx_led.name) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->tx_led.dev = &dev->dev; card->recon_led.brightness_set = led_recon_set; card->recon_led.default_trigger = devm_kasprintf(&pdev->dev, GFP_KERNEL, "arc%d-%d-recon", dev->dev_id, i); + if (!card->recon_led.default_trigger) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pci:red:recon:%d-%d", dev->dev_id, i); + if (!card->recon_led.name) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->recon_led.dev = &dev->dev;
ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e45bba240cbc..4da5fcb7def4 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -322,9 +322,9 @@ static bool bond_sk_check(struct bonding *bond) } }
-static bool bond_xdp_check(struct bonding *bond) +bool bond_xdp_check(struct bonding *bond, int mode) { - switch (BOND_MODE(bond)) { + switch (mode) { case BOND_MODE_ROUNDROBIN: case BOND_MODE_ACTIVEBACKUP: return true; @@ -1937,7 +1937,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
ASSERT_RTNL();
- if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) { + if (!bond_xdp_check(bond, BOND_MODE(bond)) || !bond_has_slaves(bond)) { xdp_clear_features_flag(bond_dev); return; } @@ -5699,7 +5699,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
ASSERT_RTNL();
- if (!bond_xdp_check(bond)) { + if (!bond_xdp_check(bond, BOND_MODE(bond))) { BOND_NL_ERR(dev, extack, "No native XDP support for the current bonding mode"); return -EOPNOTSUPP; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index d1b095af253b..91893c29b899 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -868,6 +868,9 @@ static bool bond_set_xfrm_features(struct bonding *bond) static int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval) { + if (bond->xdp_prog && !bond_xdp_check(bond, newval->value)) + return -EOPNOTSUPP; + if (!bond_mode_uses_arp(newval->value)) { if (bond->params.arp_interval) { netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c index d9a937ba126c..46201c126703 100644 --- a/drivers/net/can/rockchip/rockchip_canfd-core.c +++ b/drivers/net/can/rockchip/rockchip_canfd-core.c @@ -236,11 +236,6 @@ static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv) { u32 reg;
- /* TXE FIFO */ - reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL); - reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE; - rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg); - /* RX FIFO */ reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL); reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE; diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c index da7110d67558..be433b4e2b1c 100644 --- a/drivers/net/dsa/microchip/ksz8.c +++ b/drivers/net/dsa/microchip/ksz8.c @@ -1625,7 +1625,6 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) const u16 *regs = dev->info->regs; struct dsa_switch *ds = dev->ds; const u32 *masks; - int queues; u8 member;
masks = dev->info->masks; @@ -1633,15 +1632,7 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) /* enable broadcast storm limit */ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
- /* For KSZ88x3 enable only one queue by default, otherwise we won't - * be able to get rid of PCP prios on Port 2. - */ - if (ksz_is_ksz88x3(dev)) - queues = 1; - else - queues = dev->info->num_tx_queues; - - ksz8_port_queue_split(dev, port, queues); + ksz8_port_queue_split(dev, port, dev->info->num_tx_queues);
/* replace priority */ ksz_port_cfg(dev, port, P_802_1P_CTRL, diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c index 30b4a6186e38..c3b501997ac9 100644 --- a/drivers/net/dsa/microchip/ksz_dcb.c +++ b/drivers/net/dsa/microchip/ksz_dcb.c @@ -10,7 +10,12 @@ #include "ksz_dcb.h" #include "ksz8.h"
-#define KSZ8_REG_PORT_1_CTRL_0 0x10 +/* Port X Control 0 register. + * The datasheet specifies: Port 1 - 0x10, Port 2 - 0x20, Port 3 - 0x30. + * However, the driver uses get_port_addr(), which maps Port 1 to offset 0. + * Therefore, we define the base offset as 0x00 here to align with that logic. + */ +#define KSZ8_REG_PORT_1_CTRL_0 0x00 #define KSZ8_PORT_DIFFSERV_ENABLE BIT(6) #define KSZ8_PORT_802_1P_ENABLE BIT(5) #define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3) @@ -181,49 +186,6 @@ int ksz_port_get_default_prio(struct dsa_switch *ds, int port) return (data & mask) >> shift; }
-/** - * ksz88x3_port_set_default_prio_quirks - Quirks for default priority - * @dev: Pointer to the KSZ switch device structure - * @port: Port number for which to set the default priority - * @prio: Priority value to set - * - * This function implements quirks for setting the default priority on KSZ88x3 - * devices. On Port 2, no other priority providers are working - * except of PCP. So, configuring default priority on Port 2 is not possible. - * On Port 1, it is not possible to configure port priority if PCP - * apptrust on Port 2 is disabled. Since we disable multiple queues on the - * switch to disable PCP on Port 2, we need to ensure that the default priority - * configuration on Port 1 is in agreement with the configuration on Port 2. - * - * Return: 0 on success, or a negative error code on failure - */ -static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port, - u8 prio) -{ - if (!prio) - return 0; - - if (port == KSZ_PORT_2) { - dev_err(dev->dev, "Port priority configuration is not working on Port 2\n"); - return -EINVAL; - } else if (port == KSZ_PORT_1) { - u8 port2_data; - int ret; - - ret = ksz_pread8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0, - &port2_data); - if (ret) - return ret; - - if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) { - dev_err(dev->dev, "Not possible to configure port priority on Port 1 if PCP apptrust on Port 2 is disabled\n"); - return -EINVAL; - } - } - - return 0; -} - /** * ksz_port_set_default_prio - Sets the default priority for a port on a KSZ * switch @@ -239,18 +201,12 @@ static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio) { struct ksz_device *dev = ds->priv; - int reg, shift, ret; + int reg, shift; u8 mask;
if (prio >= dev->info->num_ipms) return -EINVAL;
- if (ksz_is_ksz88x3(dev)) { - ret = ksz88x3_port_set_default_prio_quirks(dev, port, prio); - if (ret) - return ret; - } - ksz_get_default_port_prio_reg(dev, ®, &mask, &shift);
return ksz_prmw8(dev, port, reg, mask, (prio << shift) & mask); @@ -518,155 +474,6 @@ static int ksz_port_set_apptrust_validate(struct ksz_device *dev, int port, return -EINVAL; }
-/** - * ksz88x3_port1_apptrust_quirk - Quirk for apptrust configuration on Port 1 - * of KSZ88x3 devices - * @dev: Pointer to the KSZ switch device structure - * @port: Port number for which to set the apptrust selectors - * @reg: Register address for the apptrust configuration - * @port1_data: Data to set for the apptrust configuration - * - * This function implements a quirk for apptrust configuration on Port 1 of - * KSZ88x3 devices. It ensures that apptrust configuration on Port 1 is not - * possible if PCP apptrust on Port 2 is disabled. This is because the Port 2 - * seems to be permanently hardwired to PCP classification, so we need to - * do Port 1 configuration always in agreement with Port 2 configuration. - * - * Return: 0 on success, or a negative error code on failure - */ -static int ksz88x3_port1_apptrust_quirk(struct ksz_device *dev, int port, - int reg, u8 port1_data) -{ - u8 port2_data; - int ret; - - /* If no apptrust is requested for Port 1, no need to care about Port 2 - * configuration. - */ - if (!(port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE))) - return 0; - - /* We got request to enable any apptrust on Port 1. To make it possible, - * we need to enable multiple queues on the switch. If we enable - * multiqueue support, PCP classification on Port 2 will be - * automatically activated by HW. - */ - ret = ksz_pread8(dev, KSZ_PORT_2, reg, &port2_data); - if (ret) - return ret; - - /* If KSZ8_PORT_802_1P_ENABLE bit is set on Port 2, the driver showed - * the interest in PCP classification on Port 2. In this case, - * multiqueue support is enabled and we can enable any apptrust on - * Port 1. - * If KSZ8_PORT_802_1P_ENABLE bit is not set on Port 2, the PCP - * classification on Port 2 is still active, but the driver disabled - * multiqueue support and made frame prioritization inactive for - * all ports. In this case, we can't enable any apptrust on Port 1. - */ - if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) { - dev_err(dev->dev, "Not possible to enable any apptrust on Port 1 if PCP apptrust on Port 2 is disabled\n"); - return -EINVAL; - } - - return 0; -} - -/** - * ksz88x3_port2_apptrust_quirk - Quirk for apptrust configuration on Port 2 - * of KSZ88x3 devices - * @dev: Pointer to the KSZ switch device structure - * @port: Port number for which to set the apptrust selectors - * @reg: Register address for the apptrust configuration - * @port2_data: Data to set for the apptrust configuration - * - * This function implements a quirk for apptrust configuration on Port 2 of - * KSZ88x3 devices. It ensures that DSCP apptrust is not working on Port 2 and - * that it is not possible to disable PCP on Port 2. The only way to disable PCP - * on Port 2 is to disable multiple queues on the switch. - * - * Return: 0 on success, or a negative error code on failure - */ -static int ksz88x3_port2_apptrust_quirk(struct ksz_device *dev, int port, - int reg, u8 port2_data) -{ - struct dsa_switch *ds = dev->ds; - u8 port1_data; - int ret; - - /* First validate Port 2 configuration. DiffServ/DSCP is not working - * on this port. - */ - if (port2_data & KSZ8_PORT_DIFFSERV_ENABLE) { - dev_err(dev->dev, "DSCP apptrust is not working on Port 2\n"); - return -EINVAL; - } - - /* If PCP support is requested, we need to enable all queues on the - * switch to make PCP priority working on Port 2. - */ - if (port2_data & KSZ8_PORT_802_1P_ENABLE) - return ksz8_all_queues_split(dev, dev->info->num_tx_queues); - - /* We got request to disable PCP priority on Port 2. - * Now, we need to compare Port 2 configuration with Port 1 - * configuration. - */ - ret = ksz_pread8(dev, KSZ_PORT_1, reg, &port1_data); - if (ret) - return ret; - - /* If Port 1 has any apptrust enabled, we can't disable multiple queues - * on the switch, so we can't disable PCP on Port 2. - */ - if (port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)) { - dev_err(dev->dev, "Not possible to disable PCP on Port 2 if any apptrust is enabled on Port 1\n"); - return -EINVAL; - } - - /* Now we need to ensure that default priority on Port 1 is set to 0 - * otherwise we can't disable multiqueue support on the switch. - */ - ret = ksz_port_get_default_prio(ds, KSZ_PORT_1); - if (ret < 0) { - return ret; - } else if (ret) { - dev_err(dev->dev, "Not possible to disable PCP on Port 2 if non zero default priority is set on Port 1\n"); - return -EINVAL; - } - - /* Port 1 has no apptrust or default priority set and we got request to - * disable PCP on Port 2. We can disable multiqueue support to disable - * PCP on Port 2. - */ - return ksz8_all_queues_split(dev, 1); -} - -/** - * ksz88x3_port_apptrust_quirk - Quirk for apptrust configuration on KSZ88x3 - * devices - * @dev: Pointer to the KSZ switch device structure - * @port: Port number for which to set the apptrust selectors - * @reg: Register address for the apptrust configuration - * @data: Data to set for the apptrust configuration - * - * This function implements a quirk for apptrust configuration on KSZ88x3 - * devices. It ensures that apptrust configuration on Port 1 and - * Port 2 is done in agreement with each other. - * - * Return: 0 on success, or a negative error code on failure - */ -static int ksz88x3_port_apptrust_quirk(struct ksz_device *dev, int port, - int reg, u8 data) -{ - if (port == KSZ_PORT_1) - return ksz88x3_port1_apptrust_quirk(dev, port, reg, data); - else if (port == KSZ_PORT_2) - return ksz88x3_port2_apptrust_quirk(dev, port, reg, data); - - return 0; -} - /** * ksz_port_set_apptrust - Sets the apptrust selectors for a port on a KSZ * switch @@ -707,12 +514,6 @@ int ksz_port_set_apptrust(struct dsa_switch *ds, int port, } }
- if (ksz_is_ksz88x3(dev)) { - ret = ksz88x3_port_apptrust_quirk(dev, port, reg, data); - if (ret) - return ret; - } - return ksz_prmw8(dev, port, reg, mask, data); }
@@ -799,21 +600,5 @@ int ksz_dcb_init_port(struct ksz_device *dev, int port) */ int ksz_dcb_init(struct ksz_device *dev) { - int ret; - - ret = ksz_init_global_dscp_map(dev); - if (ret) - return ret; - - /* Enable 802.1p priority control on Port 2 during switch initialization. - * This setup is critical for the apptrust functionality on Port 1, which - * relies on the priority settings of Port 2. Note: Port 1 is naturally - * configured before Port 2, necessitating this configuration order. - */ - if (ksz_is_ksz88x3(dev)) - return ksz_prmw8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0, - KSZ8_PORT_802_1P_ENABLE, - KSZ8_PORT_802_1P_ENABLE); - - return 0; + return ksz_init_global_dscp_map(dev); } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 5db96ca52505..4a9fbfa8db41 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -5145,6 +5145,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -5169,8 +5170,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, - .vtu_getnext = mv88e6185_g1_vtu_getnext, - .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .stu_getnext = mv88e6352_g1_stu_getnext, + .stu_loadpurge = mv88e6352_g1_stu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -5194,6 +5197,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -5217,8 +5221,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, - .vtu_getnext = mv88e6185_g1_vtu_getnext, - .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .stu_getnext = mv88e6352_g1_stu_getnext, + .stu_loadpurge = mv88e6352_g1_stu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -5818,7 +5824,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, - .atu_move_port_mask = 0x1f, + .atu_move_port_mask = 0xf, .g1_irqs = 9, .g2_irqs = 10, .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, @@ -6239,6 +6245,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 5, .num_gpio = 15, .max_vid = 4095, + .max_sid = 63, .port_base_addr = 0x10, .phy_base_addr = 0x0, .global1_addr = 0x1b, @@ -6265,6 +6272,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_internal_phys = 5, .num_gpio = 15, .max_vid = 4095, + .max_sid = 63, .port_base_addr = 0x10, .phy_base_addr = 0x0, .global1_addr = 0x1b, @@ -6274,6 +6282,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0xf, + .pvt = true, .multi_chip = true, .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ptp_support = true, @@ -6296,7 +6305,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, - .atu_move_port_mask = 0x1f, + .atu_move_port_mask = 0xf, .g1_irqs = 9, .g2_irqs = 10, .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, @@ -7322,13 +7331,13 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) err = mv88e6xxx_switch_reset(chip); mv88e6xxx_reg_unlock(chip); if (err) - goto out; + goto out_phy;
if (np) { chip->irq = of_irq_get(np, 0); if (chip->irq == -EPROBE_DEFER) { err = chip->irq; - goto out; + goto out_phy; } }
@@ -7347,7 +7356,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) mv88e6xxx_reg_unlock(chip);
if (err) - goto out; + goto out_phy;
if (chip->info->g2_irqs > 0) { err = mv88e6xxx_g2_irq_setup(chip); @@ -7381,6 +7390,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) mv88e6xxx_g1_irq_free(chip); else mv88e6xxx_irq_poll_free(chip); +out_phy: + mv88e6xxx_phy_destroy(chip); out: if (pdata) dev_put(pdata->netdev); @@ -7403,7 +7414,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_ptp_free(chip); }
- mv88e6xxx_phy_destroy(chip); mv88e6xxx_unregister_switch(chip);
mv88e6xxx_g1_vtu_prob_irq_free(chip); @@ -7416,6 +7426,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_g1_irq_free(chip); else mv88e6xxx_irq_poll_free(chip); + + mv88e6xxx_phy_destroy(chip); }
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 8bb88b3d900d..ee9e5d7e5277 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -229,7 +229,10 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip) { + mutex_lock(&chip->ppu_mutex); del_timer_sync(&chip->ppu_timer); + cancel_work_sync(&chip->ppu_work); + mutex_unlock(&chip->ppu_mutex); }
int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus, diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c index 2ea64b1d026d..84d7d3f66bd0 100644 --- a/drivers/net/dsa/sja1105/sja1105_ethtool.c +++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c @@ -571,6 +571,9 @@ void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) { + if (!strlen(sja1105_port_counters[i].name)) + continue; + rc = sja1105_port_counter_read(priv, port, i, &data[k++]); if (rc) { dev_err(ds->dev, @@ -596,8 +599,12 @@ void sja1105_get_strings(struct dsa_switch *ds, int port, else max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
- for (i = 0; i < max_ctr; i++) + for (i = 0; i < max_ctr; i++) { + if (!strlen(sja1105_port_counters[i].name)) + continue; + ethtool_puts(&data, sja1105_port_counters[i].name); + } }
int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset) diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index a1f4ca6ad888..08b45fdd1d24 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -61,17 +61,21 @@ enum sja1105_ptp_clk_mode { int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) { struct sja1105_private *priv = ds->priv; + unsigned long hwts_tx_en, hwts_rx_en; struct hwtstamp_config config;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT;
+ hwts_tx_en = priv->hwts_tx_en; + hwts_rx_en = priv->hwts_rx_en; + switch (config.tx_type) { case HWTSTAMP_TX_OFF: - priv->hwts_tx_en &= ~BIT(port); + hwts_tx_en &= ~BIT(port); break; case HWTSTAMP_TX_ON: - priv->hwts_tx_en |= BIT(port); + hwts_tx_en |= BIT(port); break; default: return -ERANGE; @@ -79,15 +83,21 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: - priv->hwts_rx_en &= ~BIT(port); + hwts_rx_en &= ~BIT(port); break; - default: - priv->hwts_rx_en |= BIT(port); + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + hwts_rx_en |= BIT(port); break; + default: + return -ERANGE; }
if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) return -EFAULT; + + priv->hwts_tx_en = hwts_tx_en; + priv->hwts_rx_en = hwts_rx_en; + return 0; }
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index 3d790f8c6f4d..ffece8a400a6 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -1917,8 +1917,10 @@ int sja1105_table_delete_entry(struct sja1105_table *table, int i) if (i > table->entry_count) return -ERANGE;
- memmove(entries + i * entry_size, entries + (i + 1) * entry_size, - (table->entry_count - i) * entry_size); + if (i + 1 < table->entry_count) { + memmove(entries + i * entry_size, entries + (i + 1) * entry_size, + (table->entry_count - i - 1) * entry_size); + }
table->entry_count--;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 55f553debd3b..2cd79b59cf00 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -485,6 +485,17 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) txr = &bp->tx_ring[bp->tx_ring_map[i]]; prod = txr->tx_prod;
+#if (MAX_SKB_FRAGS > TX_MAX_FRAGS) + if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) { + netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n", + skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS); + if (skb_linearize(skb)) { + dev_kfree_skb_any(skb); + dev_core_stats_tx_dropped_inc(dev); + return NETDEV_TX_OK; + } + } +#endif free_size = bnxt_tx_avail(bp, txr); if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { /* We must have raced with NAPI cleanup */ @@ -564,7 +575,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) TX_BD_FLAGS_LHINT_512_AND_SMALLER | TX_BD_FLAGS_COAL_NOW | TX_BD_FLAGS_PACKET_END | - (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); + TX_BD_CNT(2));
if (skb->ip_summed == CHECKSUM_PARTIAL) tx_push1->tx_bd_hsize_lflags = @@ -639,7 +650,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_unmap_addr_set(tx_buf, mapping, mapping); flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | - ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); + TX_BD_CNT(last_frag + 2);
txbd->tx_bd_haddr = cpu_to_le64(mapping); txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); @@ -15651,7 +15662,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) cpr = &rxr->bnapi->cp_ring; cpr->sw_stats->rx.rx_resets++;
- for (i = 0; i <= bp->nr_vnics; i++) { + for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i];
rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); @@ -15679,7 +15690,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) struct bnxt_vnic_info *vnic; int i;
- for (i = 0; i <= bp->nr_vnics; i++) { + for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; vnic->mru = 0; bnxt_hwrm_vnic_update(bp, vnic, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 2373f423a523..d621fb621f30 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -82,6 +82,12 @@ struct tx_bd { #define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\ (bp)->tx_ring_mask)
+#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT) + +#define TX_MAX_BD_CNT 32 + +#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2) + struct tx_bd_ext { __le32 tx_bd_hsize_lflags; #define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 299822cacca4..d71bad3cfd6b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -48,8 +48,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; - flags = (len << TX_BD_LEN_SHIFT) | - ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) | + flags = (len << TX_BD_LEN_SHIFT) | TX_BD_CNT(num_frags + 1) | bnxt_lhint_arr[len >> 9]; txbd->tx_bd_len_flags_type = cpu_to_le32(flags); txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index b619a3ec245b..04192190beba 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1802,18 +1802,22 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, long value = simple_strtol(buf, NULL, 10); long rc;
+ rtnl_lock(); + if (attr == &veth_active_attr) { if (value && !pool->active) { if (netif_running(netdev)) { if (ibmveth_alloc_buffer_pool(pool)) { netdev_err(netdev, "unable to alloc pool\n"); - return -ENOMEM; + rc = -ENOMEM; + goto unlock_err; } pool->active = 1; ibmveth_close(netdev); - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->active = 1; } @@ -1833,48 +1837,59 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
if (i == IBMVETH_NUM_BUFF_POOLS) { netdev_err(netdev, "no active pool >= MTU\n"); - return -EPERM; + rc = -EPERM; + goto unlock_err; }
if (netif_running(netdev)) { ibmveth_close(netdev); pool->active = 0; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } pool->active = 0; } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { - return -EINVAL; + rc = -EINVAL; + goto unlock_err; } else { if (netif_running(netdev)) { ibmveth_close(netdev); pool->size = value; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->size = value; } } } else if (attr == &veth_size_attr) { if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { - return -EINVAL; + rc = -EINVAL; + goto unlock_err; } else { if (netif_running(netdev)) { ibmveth_close(netdev); pool->buff_size = value; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->buff_size = value; } } } + rtnl_unlock();
/* kick the interrupt handler to allocate/deallocate pools */ ibmveth_interrupt(netdev->irq, netdev); return count; + +unlock_err: + rtnl_unlock(); + return rc; }
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 0676fc547b6f..480606d1245e 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4829,6 +4829,18 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter, strscpy(vlcd->name, adapter->netdev->name, len); }
+static void ibmvnic_print_hex_dump(struct net_device *dev, void *buf, + size_t len) +{ + unsigned char hex_str[16 * 3]; + + for (size_t i = 0; i < len; i += 16) { + hex_dump_to_buffer((unsigned char *)buf + i, len - i, 16, 8, + hex_str, sizeof(hex_str), false); + netdev_dbg(dev, "%s\n", hex_str); + } +} + static int send_login(struct ibmvnic_adapter *adapter) { struct ibmvnic_login_rsp_buffer *login_rsp_buffer; @@ -4939,10 +4951,8 @@ static int send_login(struct ibmvnic_adapter *adapter) vnic_add_client_data(adapter, vlcd);
netdev_dbg(adapter->netdev, "Login Buffer:\n"); - for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { - netdev_dbg(adapter->netdev, "%016lx\n", - ((unsigned long *)(adapter->login_buf))[i]); - } + ibmvnic_print_hex_dump(adapter->netdev, adapter->login_buf, + adapter->login_buf_sz);
memset(&crq, 0, sizeof(crq)); crq.login.first = IBMVNIC_CRQ_CMD; @@ -5319,15 +5329,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; - int i;
dma_unmap_single(dev, adapter->ip_offload_tok, sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); - for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) - netdev_dbg(adapter->netdev, "%016lx\n", - ((unsigned long *)(buf))[i]); + ibmvnic_print_hex_dump(adapter->netdev, buf, + sizeof(adapter->ip_offload_buf));
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); @@ -5558,10 +5566,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); - for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { - netdev_dbg(adapter->netdev, "%016lx\n", - ((unsigned long *)(adapter->login_rsp_buf))[i]); - } + ibmvnic_print_hex_dump(netdev, adapter->login_rsp_buf, + adapter->login_rsp_buf_sz);
/* Sanity checks */ if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 5e2cfa73f889..8294a7c4f122 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -803,4 +803,7 @@ /* SerDes Control */ #define E1000_GEN_POLL_TIMEOUT 640
+#define E1000_FEXTNVM12_PHYPD_CTRL_MASK 0x00C00000 +#define E1000_FEXTNVM12_PHYPD_CTRL_P1 0x00800000 + #endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 2f9655cf5dd9..364378133526 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -285,6 +285,45 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) } }
+/** + * e1000_reconfigure_k1_exit_timeout - reconfigure K1 exit timeout to + * align to MTP and later platform requirements. + * @hw: pointer to the HW structure + * + * Context: PHY semaphore must be held by caller. + * Return: 0 on success, negative on failure + */ +static s32 e1000_reconfigure_k1_exit_timeout(struct e1000_hw *hw) +{ + u16 phy_timeout; + u32 fextnvm12; + s32 ret_val; + + if (hw->mac.type < e1000_pch_mtp) + return 0; + + /* Change Kumeran K1 power down state from P0s to P1 */ + fextnvm12 = er32(FEXTNVM12); + fextnvm12 &= ~E1000_FEXTNVM12_PHYPD_CTRL_MASK; + fextnvm12 |= E1000_FEXTNVM12_PHYPD_CTRL_P1; + ew32(FEXTNVM12, fextnvm12); + + /* Wait for the interface the settle */ + usleep_range(1000, 1100); + + /* Change K1 exit timeout */ + ret_val = e1e_rphy_locked(hw, I217_PHY_TIMEOUTS_REG, + &phy_timeout); + if (ret_val) + return ret_val; + + phy_timeout &= ~I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK; + phy_timeout |= 0xF00; + + return e1e_wphy_locked(hw, I217_PHY_TIMEOUTS_REG, + phy_timeout); +} + /** * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds * @hw: pointer to the HW structure @@ -327,15 +366,22 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) * LANPHYPC Value bit to force the interconnect to PCIe mode. */ switch (hw->mac.type) { + case e1000_pch_mtp: + case e1000_pch_lnp: + case e1000_pch_ptp: + case e1000_pch_nvp: + /* At this point the PHY might be inaccessible so don't + * propagate the failure + */ + if (e1000_reconfigure_k1_exit_timeout(hw)) + e_dbg("Failed to reconfigure K1 exit timeout\n"); + + fallthrough; case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: - case e1000_pch_mtp: - case e1000_pch_lnp: - case e1000_pch_ptp: - case e1000_pch_nvp: if (e1000_phy_is_accessible_pchlan(hw)) break;
@@ -419,8 +465,20 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) * the PHY is in. */ ret_val = hw->phy.ops.check_reset_block(hw); - if (ret_val) + if (ret_val) { e_err("ME blocked access to PHY after reset\n"); + goto out; + } + + if (hw->mac.type >= e1000_pch_mtp) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_err("Failed to reconfigure K1 exit timeout\n"); + goto out; + } + ret_val = e1000_reconfigure_k1_exit_timeout(hw); + hw->phy.ops.release(hw); + } }
out: @@ -4888,6 +4946,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) u16 i;
e1000_initialize_hw_bits_ich8lan(hw); + if (hw->mac.type >= e1000_pch_mtp) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_reconfigure_k1_exit_timeout(hw); + hw->phy.ops.release(hw); + if (ret_val) { + e_dbg("Error failed to reconfigure K1 exit timeout\n"); + return ret_val; + } + }
/* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 2504b11c3169..5feb589a9b5f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -219,6 +219,10 @@ #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) #define I217_PLL_CLOCK_GATE_MASK 0x07FF
+/* PHY Timeouts */ +#define I217_PHY_TIMEOUTS_REG PHY_REG(770, 21) +#define I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK 0x0FC0 + #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
/* Inband Control */ diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c index ea40f7941259..19c3d37aa768 100644 --- a/drivers/net/ethernet/intel/ice/devlink/health.c +++ b/drivers/net/ethernet/intel/ice/devlink/health.c @@ -25,10 +25,10 @@ struct ice_health_status { * The below lookup requires to be sorted by code. */
-static const char *const ice_common_port_solutions = +static const char ice_common_port_solutions[] = "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex."; -static const char *const ice_port_number_label = "Port Number"; -static const char *const ice_update_nvm_solution = "Update to the latest NVM image."; +static const char ice_port_number_label[] = "Port Number"; +static const char ice_update_nvm_solution[] = "Update to the latest NVM image.";
static const struct ice_health_status ice_health_status_lookup[] = { {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.", diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 7a2a2e8da8fa..1e801300310e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2271,7 +2271,8 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, caps->nvm_unified_update); break; case ICE_AQC_CAPS_RDMA: - caps->rdma = (number == 1); + if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA)) + caps->rdma = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); break; case ICE_AQC_CAPS_MAX_MTU: diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index e26320ce52ca..a99e0fbd0b8b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -1783,6 +1783,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, 8 + chan + (tmr_idx * 4));
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); + ice_flush(hw);
return 0; } @@ -1843,9 +1844,10 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, div64_u64_rem(start, period, &phase);
/* If we have only phase or start time is in the past, start the timer - * at the next multiple of period, maintaining phase. + * at the next multiple of period, maintaining phase at least 0.5 second + * from now, so we have time to write it to HW. */ - clk = ice_ptp_read_src_clk_reg(pf, NULL); + clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500; if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) start = div64_u64(clk + period - 1, period) * period + phase;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index ff4ad788d96a..1af51469f070 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -562,7 +562,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) * * check for the valid queue ID */ -static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid) +static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid) { /* allocated Tx and Rx queues should be always equal for VF VSI */ return qid < vsi->alloc_txq; @@ -1862,15 +1862,33 @@ static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
for (i = 0; i < qbw->num_queues; i++) { if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 && - qbw->cfg[i].shaper.peak > vf->max_tx_rate) + qbw->cfg[i].shaper.peak > vf->max_tx_rate) { dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n", qbw->cfg[i].queue_id, vf->vf_id, vf->max_tx_rate); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 && - qbw->cfg[i].shaper.committed < vf->min_tx_rate) + qbw->cfg[i].shaper.committed < vf->min_tx_rate) { dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n", qbw->cfg[i].queue_id, vf->vf_id, - vf->max_tx_rate); + vf->min_tx_rate); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (qbw->cfg[i].queue_id > vf->num_vf_qs) { + dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) { + dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } }
for (i = 0; i < qbw->num_queues; i++) { @@ -1900,13 +1918,21 @@ static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg) */ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg) { + u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - u16 quanta_prof_id, quanta_size, start_qid, end_qid, i; struct virtchnl_quanta_cfg *qquanta = (struct virtchnl_quanta_cfg *)msg; struct ice_vsi *vsi; int ret;
+ start_qid = qquanta->queue_select.start_queue_id; + num_queues = qquanta->queue_select.num_queues; + + if (check_add_overflow(start_qid, num_queues, &end_qid)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; @@ -1918,8 +1944,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg) goto err; }
- end_qid = qquanta->queue_select.start_queue_id + - qquanta->queue_select.num_queues; if (end_qid > ICE_MAX_RSS_QS_PER_VF || end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n", @@ -1948,7 +1972,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg) goto err; }
- start_qid = qquanta->queue_select.start_queue_id; for (i = start_qid; i < end_qid; i++) vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 14e3f0f89c78..9be4bd717512 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -832,21 +832,27 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto, struct virtchnl_fdir_fltr_conf *conf) { - u8 *pkt_buf, *msk_buf __free(kfree); + u8 *pkt_buf, *msk_buf __free(kfree) = NULL; struct ice_parser_result rslt; struct ice_pf *pf = vf->pf; + u16 pkt_len, udp_port = 0; struct ice_parser *psr; int status = -ENOMEM; struct ice_hw *hw; - u16 udp_port = 0;
- pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); - msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); + pkt_len = proto->raw.pkt_len; + + if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET) + return -EINVAL; + + pkt_buf = kzalloc(pkt_len, GFP_KERNEL); + msk_buf = kzalloc(pkt_len, GFP_KERNEL); + if (!pkt_buf || !msk_buf) goto err_mem_alloc;
- memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len); - memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len); + memcpy(pkt_buf, proto->raw.spec, pkt_len); + memcpy(msk_buf, proto->raw.mask, pkt_len);
hw = &pf->hw;
@@ -862,7 +868,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf, if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN)) ice_parser_vxlan_tunnel_set(psr, udp_port, true);
- status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt); + status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt); if (status) goto err_parser_destroy;
@@ -876,7 +882,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf, }
status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, - proto->raw.pkt_len, ICE_BLK_FD, + pkt_len, ICE_BLK_FD, conf->prof); if (status) goto err_parser_profile_init; @@ -885,7 +891,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf, ice_parser_profile_dump(hw, conf->prof);
/* Store raw flow info into @conf */ - conf->pkt_len = proto->raw.pkt_len; + conf->pkt_len = pkt_len; conf->pkt_buf = pkt_buf; conf->parser_ena = true;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index a3d6b8f198a8..a055a47449f1 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -927,15 +927,19 @@ static int idpf_stop(struct net_device *netdev) static void idpf_decfg_netdev(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; + u16 idx = vport->idx;
kfree(vport->rx_ptype_lkup); vport->rx_ptype_lkup = NULL;
- unregister_netdev(vport->netdev); - free_netdev(vport->netdev); + if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV, + adapter->vport_config[idx]->flags)) { + unregister_netdev(vport->netdev); + free_netdev(vport->netdev); + } vport->netdev = NULL;
- adapter->netdevs[vport->idx] = NULL; + adapter->netdevs[idx] = NULL; }
/** @@ -1536,13 +1540,22 @@ void idpf_init_task(struct work_struct *work) }
for (index = 0; index < adapter->max_vports; index++) { - if (adapter->netdevs[index] && - !test_bit(IDPF_VPORT_REG_NETDEV, - adapter->vport_config[index]->flags)) { - register_netdev(adapter->netdevs[index]); - set_bit(IDPF_VPORT_REG_NETDEV, - adapter->vport_config[index]->flags); + struct net_device *netdev = adapter->netdevs[index]; + struct idpf_vport_config *vport_config; + + vport_config = adapter->vport_config[index]; + + if (!netdev || + test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) + continue; + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n", + index, ERR_PTR(err)); + continue; } + set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags); }
/* As all the required vports are created, clear the reset flag diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index b6c515d14cbf..bec4a02c5373 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -87,7 +87,11 @@ static void idpf_remove(struct pci_dev *pdev) */ static void idpf_shutdown(struct pci_dev *pdev) { - idpf_remove(pdev); + struct idpf_adapter *adapter = pci_get_drvdata(pdev); + + cancel_delayed_work_sync(&adapter->vc_event_task); + idpf_vc_core_deinit(adapter); + idpf_deinit_dflt_mbx(adapter);
if (system_state == SYSTEM_POWER_OFF) pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index f94570556120..f323e1c1989f 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -509,6 +509,12 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp, PTP_STRICT_FLAGS)) return -EOPNOTSUPP;
+ /* Both the rising and falling edge are timestamped */ + if (rq->extts.flags & PTP_STRICT_FLAGS && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + if (on) { pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, rq->extts.index); diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index b8111ad9a9a8..cd1d7b6c1782 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -579,6 +579,7 @@ struct igc_metadata_request { struct xsk_tx_metadata *meta; struct igc_ring *tx_ring; u32 cmd_type; + u16 used_desc; };
struct igc_q_vector { diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 84307bb7313e..706dd26d4dde 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1092,7 +1092,8 @@ static int igc_init_empty_frame(struct igc_ring *ring,
dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); if (dma_mapping_error(ring->dev, dma)) { - netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + net_err_ratelimited("%s: DMA mapping error for empty frame\n", + netdev_name(ring->netdev)); return -ENOMEM; }
@@ -1108,20 +1109,12 @@ static int igc_init_empty_frame(struct igc_ring *ring, return 0; }
-static int igc_init_tx_empty_descriptor(struct igc_ring *ring, - struct sk_buff *skb, - struct igc_tx_buffer *first) +static void igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) { union igc_adv_tx_desc *desc; u32 cmd_type, olinfo_status; - int err; - - if (!igc_desc_unused(ring)) - return -EBUSY; - - err = igc_init_empty_frame(ring, first, skb); - if (err) - return err;
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | @@ -1140,8 +1133,6 @@ static int igc_init_tx_empty_descriptor(struct igc_ring *ring, ring->next_to_use++; if (ring->next_to_use == ring->count) ring->next_to_use = 0; - - return 0; }
#define IGC_EMPTY_FRAME_SIZE 60 @@ -1567,6 +1558,40 @@ static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *s return false; }
+static int igc_insert_empty_frame(struct igc_ring *tx_ring) +{ + struct igc_tx_buffer *empty_info; + struct sk_buff *empty_skb; + void *data; + int ret; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty_skb = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (unlikely(!empty_skb)) { + net_err_ratelimited("%s: skb alloc error for empty frame\n", + netdev_name(tx_ring->netdev)); + return -ENOMEM; + } + + data = skb_put(empty_skb, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + /* Prepare DMA mapping and Tx buffer information */ + ret = igc_init_empty_frame(tx_ring, empty_info, empty_skb); + if (unlikely(ret)) { + dev_kfree_skb_any(empty_skb); + return ret; + } + + /* Prepare advanced context descriptor for empty packet */ + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + /* Prepare advanced data descriptor for empty packet */ + igc_init_tx_empty_descriptor(tx_ring, empty_skb, empty_info); + + return 0; +} + static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, struct igc_ring *tx_ring) { @@ -1586,6 +1611,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, + * + 2 desc for inserting an empty packet for launch time, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) @@ -1605,24 +1631,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
if (insert_empty) { - struct igc_tx_buffer *empty_info; - struct sk_buff *empty; - void *data; - - empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; - empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); - if (!empty) - goto done; - - data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); - memset(data, 0, IGC_EMPTY_FRAME_SIZE); - - igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); - - if (igc_init_tx_empty_descriptor(tx_ring, - empty, - empty_info) < 0) - dev_kfree_skb_any(empty); + /* Reset the launch time if the required empty frame fails to + * be inserted. However, this packet is not dropped, so it + * "dirties" the current Qbv cycle. This ensures that the + * upcoming packet, which is scheduled in the next Qbv cycle, + * does not require an empty frame. This way, the launch time + * continues to function correctly despite the current failure + * to insert the empty frame. + */ + if (igc_insert_empty_frame(tx_ring)) + launch_time = 0; }
done: @@ -2953,9 +2971,48 @@ static u64 igc_xsk_fill_timestamp(void *_priv) return *(u64 *)_priv; }
+static void igc_xsk_request_launch_time(u64 launch_time, void *_priv) +{ + struct igc_metadata_request *meta_req = _priv; + struct igc_ring *tx_ring = meta_req->tx_ring; + __le32 launch_time_offset; + bool insert_empty = false; + bool first_flag = false; + u16 used_desc = 0; + + if (!tx_ring->launchtime_enable) + return; + + launch_time_offset = igc_tx_launchtime(tx_ring, + ns_to_ktime(launch_time), + &first_flag, &insert_empty); + if (insert_empty) { + /* Disregard the launch time request if the required empty frame + * fails to be inserted. + */ + if (igc_insert_empty_frame(tx_ring)) + return; + + meta_req->tx_buffer = + &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + /* Inserting an empty packet requires two descriptors: + * one data descriptor and one context descriptor. + */ + used_desc += 2; + } + + /* Use one context descriptor to specify launch time and first flag. */ + igc_tx_ctxtdesc(tx_ring, launch_time_offset, first_flag, 0, 0, 0); + used_desc += 1; + + /* Update the number of used descriptors in this request */ + meta_req->used_desc += used_desc; +} + const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = { .tmo_request_timestamp = igc_xsk_request_timestamp, .tmo_fill_timestamp = igc_xsk_fill_timestamp, + .tmo_request_launch_time = igc_xsk_request_launch_time, };
static void igc_xdp_xmit_zc(struct igc_ring *ring) @@ -2978,7 +3035,13 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) ntu = ring->next_to_use; budget = igc_desc_unused(ring);
- while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { + /* Packets with launch time require one data descriptor and one context + * descriptor. When the launch time falls into the next Qbv cycle, we + * may need to insert an empty packet, which requires two more + * descriptors. Therefore, to be safe, we always ensure we have at least + * 4 descriptors available. + */ + while (budget >= 4 && xsk_tx_peek_desc(pool, &xdp_desc)) { struct igc_metadata_request meta_req; struct xsk_tx_metadata *meta = NULL; struct igc_tx_buffer *bi; @@ -2999,9 +3062,19 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) meta_req.tx_ring = ring; meta_req.tx_buffer = bi; meta_req.meta = meta; + meta_req.used_desc = 0; xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops, &meta_req);
+ /* xsk_tx_metadata_request() may have updated next_to_use */ + ntu = ring->next_to_use; + + /* xsk_tx_metadata_request() may have updated Tx buffer info */ + bi = meta_req.tx_buffer; + + /* xsk_tx_metadata_request() may use a few descriptors */ + budget -= meta_req.used_desc; + tx_desc = IGC_TX_DESC(ring, ntu); tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); @@ -3019,9 +3092,11 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) ntu++; if (ntu == ring->count) ntu = 0; + + ring->next_to_use = ntu; + budget--; }
- ring->next_to_use = ntu; if (tx_desc) { igc_flush_tx_descriptors(ring); xsk_tx_release(pool); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c index cb07ecd8937d..00935747c8c5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c @@ -1453,9 +1453,11 @@ enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw) hw->link.link_info.phy_type_low = 0; } else { highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low)); - if (highest_bit) + if (highest_bit) { hw->link.link_info.phy_type_low = BIT_ULL(highest_bit - 1); + hw->link.link_info.phy_type_high = 0; + } } }
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 44fe9b68d1c2..061fcd444d50 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1113,6 +1113,9 @@ struct mvpp2 {
/* Spinlocks for CM3 shared memory configuration */ spinlock_t mss_spinlock; + + /* Spinlock for shared PRS parser memory and shadow table */ + spinlock_t prs_spinlock; };
struct mvpp2_pcpu_stats { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index dd76c1b7ed3a..c63e5f1b168a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7722,8 +7722,9 @@ static int mvpp2_probe(struct platform_device *pdev) if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) priv->hw_version = MVPP23;
- /* Init mss lock */ + /* Init locks for shared packet processor resources */ spin_lock_init(&priv->mss_spinlock); + spin_lock_init(&priv->prs_spinlock);
/* Initialize network controller */ err = mvpp2_init(pdev, priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 9af22f497a40..93e978bdf303 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) { int i;
+ lockdep_assert_held(&priv->prs_spinlock); + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL;
@@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) }
/* Initialize tcam entry from hw */ -int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, - int tid) +static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) { int i;
+ lockdep_assert_held(&priv->prs_spinlock); + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL;
@@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, return 0; }
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) +{ + int err; + + spin_lock_bh(&priv->prs_spinlock); + err = __mvpp2_prs_init_from_hw(priv, pe, tid); + spin_unlock_bh(&priv->prs_spinlock); + + return err; +} + /* Invalidate tcam hw entry */ static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) { @@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); bits = mvpp2_prs_sram_ai_get(&pe);
/* Sram store classification lookup ID in AI bits [5:0] */ @@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + __mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) }
/* Set port to unicast or multicast promiscuous mode */ -void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, - enum mvpp2_prs_l2_cast l2_cast, bool add) +static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, + bool add) { struct mvpp2_prs_entry pe; unsigned char cast_match; unsigned int ri; int tid;
+ lockdep_assert_held(&priv->prs_spinlock); + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { cast_match = MVPP2_PRS_UCAST_VAL; tid = MVPP2_PE_MAC_UC_PROMISCUOUS; @@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
/* promiscuous mode - Accept unknown unicast or multicast packets */ if (priv->prs_shadow[tid].valid) { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, mvpp2_prs_hw_write(priv, &pe); }
+void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + spin_lock_bh(&priv->prs_spinlock); + __mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add); + spin_unlock_bh(&priv->prs_spinlock); +} + /* Set entry for dsa packets */ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) @@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) @@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(&pe, port_map); @@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid);
match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); @@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || @@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); }
/* Update ports' mask */ @@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) /* Create dummy entries for drop all and promiscuous modes */ mvpp2_prs_drop_fc(priv); mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); }
/* Set default entries for various types of dsa packets */ @@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) struct mvpp2_prs_entry pe; int err;
- priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), - MVPP2_PRS_DBL_VLANS_MAX, - GFP_KERNEL); - if (!priv->prs_double_vlans) - return -ENOMEM; - /* Double VLAN: 0x88A8, 0x8100 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q, MVPP2_PRS_PORT_MASK); @@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue;
- mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid);
mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock); + /* Scan TCAM and see if entry with this <vid,port> already exist */ tid = mvpp2_prs_vid_range_find(port, vid, mask);
@@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
/* There isn't room for a new VID filter */ - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + }
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); pe.index = tid; @@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); }
/* Enable the current port */ @@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe);
+ spin_unlock_bh(&priv->prs_spinlock); return 0; }
@@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) struct mvpp2 *priv = port->priv; int tid;
- /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + spin_lock_bh(&priv->prs_spinlock);
- /* No such entry */ - if (tid < 0) - return; + /* Invalidate TCAM entry with this <vid,port>, if it exists */ + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + if (tid >= 0) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + }
- mvpp2_prs_hw_inv(priv, tid); - priv->prs_shadow[tid].valid = false; + spin_unlock_bh(&priv->prs_spinlock); }
/* Remove all existing VID filters on this port */ @@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) struct mvpp2 *priv = port->priv; int tid;
+ spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (priv->prs_shadow[tid].valid) { @@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) priv->prs_shadow[tid].valid = false; } } + + spin_unlock_bh(&priv->prs_spinlock); }
/* Remove VID filering entry for this port */ @@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv;
+ spin_lock_bh(&priv->prs_spinlock); + /* Invalidate the guard entry */ mvpp2_prs_hw_inv(priv, tid);
priv->prs_shadow[tid].valid = false; + + spin_unlock_bh(&priv->prs_spinlock); }
/* Add guard entry that drops packets when no VID is matched on this port */ @@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock); + pe.index = tid;
reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); @@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + + spin_unlock_bh(&priv->prs_spinlock); }
/* Parser default initialization */ @@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) { int err, index, i;
+ priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + spin_lock_bh(&priv->prs_spinlock); + /* Enable tcam table */ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
@@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) mvpp2_prs_hw_inv(priv, index);
- priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(*priv->prs_shadow), - GFP_KERNEL); - if (!priv->prs_shadow) - return -ENOMEM; - /* Always start from lookup = 0 */ for (index = 0; index < MVPP2_MAX_PORTS; index++) mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, @@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_vid_init(priv);
err = mvpp2_prs_etype_init(priv); - if (err) - return err; - - err = mvpp2_prs_vlan_init(pdev, priv); - if (err) - return err; - - err = mvpp2_prs_pppoe_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip6_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip4_init(priv); - if (err) - return err; + err = err ? : mvpp2_prs_vlan_init(pdev, priv); + err = err ? : mvpp2_prs_pppoe_init(priv); + err = err ? : mvpp2_prs_ip6_init(priv); + err = err ? : mvpp2_prs_ip4_init(priv);
- return 0; + spin_unlock_bh(&priv->prs_spinlock); + return err; }
/* Compare MAC DA with tcam entry data */ @@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, (priv->prs_shadow[tid].udf != udf_type)) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
if (mvpp2_prs_mac_range_equals(&pe, da, mask) && @@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, }
/* Update parser's mac da entry */ -int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port, + const u8 *da, bool add) { unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct mvpp2 *priv = port->priv; @@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); }
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) return 0; }
+int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + int err; + + spin_lock_bh(&port->priv->prs_spinlock); + err = __mvpp2_prs_mac_da_accept(port, da, add); + spin_unlock_bh(&port->priv->prs_spinlock); + + return err; +} + int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) { struct mvpp2_port *port = netdev_priv(dev); @@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) unsigned long pmap; int index, tid;
+ spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; @@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid);
pmap = mvpp2_prs_tcam_port_map_get(&pe);
@@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) continue;
/* Remove entry from TCAM */ - mvpp2_prs_mac_da_accept(port, da, false); + __mvpp2_prs_mac_da_accept(port, da, false); } + + spin_unlock_bh(&priv->prs_spinlock); }
int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) { switch (type) { case MVPP2_TAG_TYPE_EDSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); @@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + spin_unlock_bh(&priv->prs_spinlock); break;
case MVPP2_TAG_TYPE_DSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break;
case MVPP2_TAG_TYPE_MH: case MVPP2_TAG_TYPE_NONE: + spin_lock_bh(&priv->prs_spinlock); /* Remove port form EDSA and DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break;
default: @@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock); + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + }
pe.index = tid;
@@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); mvpp2_prs_hw_write(priv, &pe);
+ spin_unlock_bh(&priv->prs_spinlock); return 0; }
@@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&port->priv->prs_spinlock); + tid = mvpp2_prs_flow_find(port->priv, port->id);
/* Such entry not exist */ @@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&port->priv->prs_spinlock); return tid; + }
pe.index = tid;
@@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); } else { - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); }
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); mvpp2_prs_hw_write(port->priv, &pe);
+ spin_unlock_bh(&port->priv->prs_spinlock); return 0; }
@@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index) if (index > MVPP2_PRS_TCAM_SRAM_SIZE) return -EINVAL;
+ spin_lock_bh(&priv->prs_spinlock); + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+ spin_unlock_bh(&priv->prs_spinlock); return val; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index cd0d7b7774f1..6575c422635b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2634,7 +2634,7 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); - vfs -= 64; + vfs = 64; }
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index dab4deca893f..27c3a2daaaa9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -207,7 +207,7 @@ static void rvu_nix_unregister_interrupts(struct rvu *rvu) rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false; }
- for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++) + for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++) if (rvu->irq_allocated[offs + i]) { free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl); rvu->irq_allocated[offs + i] = false; diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 09f448f29124..0c244ea5244c 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -1547,7 +1547,7 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1); switch (sport) { - case 0x10 ... 0x13: + case 0x10 ... 0x14: port = 0; break; case 0x2 ... 0x4: @@ -2793,7 +2793,7 @@ static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port, struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params; enum tx_sched_mode mode = TC_SCH_SP; u16 w[AIROHA_NUM_QOS_QUEUES] = {}; - int i, nstrict = 0, nwrr, qidx; + int i, nstrict = 0;
if (p->bands > AIROHA_NUM_QOS_QUEUES) return -EINVAL; @@ -2811,17 +2811,17 @@ static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port, * lowest priorities with respect to SP ones. * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn */ - nwrr = p->bands - nstrict; - qidx = nstrict && nwrr ? nstrict : 0; - for (i = 1; i <= p->bands; i++) { - if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx) + for (i = 0; i < nstrict; i++) { + if (p->priomap[p->bands - i - 1] != i) return -EINVAL; - - qidx = i == nwrr ? 0 : qidx + 1; }
- for (i = 0; i < nwrr; i++) + for (i = 0; i < p->bands - nstrict; i++) { + if (p->priomap[i] != nstrict + i) + return -EINVAL; + w[i] = p->weights[nstrict + i]; + }
if (!nstrict) mode = TC_SCH_WRR8; @@ -3082,7 +3082,7 @@ static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port, return -EINVAL; }
- opt->qid = channel; + opt->qid = AIROHA_NUM_TX_RING + channel;
return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 64b62ed17b07..31eb99f09c63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -423,7 +423,7 @@ u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * - PAGE_SIZE; + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); } @@ -827,7 +827,8 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); @@ -1036,7 +1037,8 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rq_param) { - int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 773624bb2c5d..d68230a7b9f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -884,8 +884,10 @@ static int flow_type_to_traffic_type(u32 flow_type) case ESP_V6_FLOW: return MLX5_TT_IPV6_IPSEC_ESP; case IPV4_FLOW: + case IP_USER_FLOW: return MLX5_TT_IPV4; case IPV6_FLOW: + case IPV6_USER_FLOW: return MLX5_TT_IPV6; default: return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index ed2ba272946b..6c9737c53734 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -1052,6 +1052,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) if (err) { if (shared_fdb || roce_lag) mlx5_lag_add_devices(ldev); + if (shared_fdb) { + mlx5_ldev_for_each(i, 0, ldev) + mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); + }
return; } else if (roce_lag) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index ec956c4bcebd..7c3312d6aed9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1205,24 +1205,24 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
- mlx5_start_health_poll(dev); - err = mlx5_core_enable_hca(dev, 0); if (err) { mlx5_core_err(dev, "enable hca failed\n"); - goto stop_health_poll; + goto err_cmd_cleanup; }
+ mlx5_start_health_poll(dev); + err = mlx5_core_set_issi(dev); if (err) { mlx5_core_err(dev, "failed to set issi\n"); - goto err_disable_hca; + goto stop_health_poll; }
err = mlx5_satisfy_startup_pages(dev, 1); if (err) { mlx5_core_err(dev, "failed to allocate boot pages\n"); - goto err_disable_hca; + goto stop_health_poll; }
err = mlx5_tout_query_dtor(dev); @@ -1235,10 +1235,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); -err_disable_hca: - mlx5_core_disable_hca(dev, 0); stop_health_poll: mlx5_stop_health_poll(dev, boot); + mlx5_core_disable_hca(dev, 0); err_cmd_cleanup: mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_disable(dev); @@ -1249,8 +1248,8 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot) { mlx5_reclaim_startup_pages(dev); - mlx5_core_disable_hca(dev, 0); mlx5_stop_health_poll(dev, boot); + mlx5_core_disable_hca(dev, 0); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_disable(dev); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c index a54eedb69a3f..067f0055a55a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -212,7 +212,22 @@ static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = { * This array defines key offsets for easy access when copying key blocks from * entry key to Bloom filter chunk. */ -static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38}; +static char * +mlxsw_sp_acl_bf_enc_key_get(struct mlxsw_sp_acl_atcam_entry *aentry, + u8 chunk_index) +{ + switch (chunk_index) { + case 0: + return &aentry->ht_key.enc_key[2]; + case 1: + return &aentry->ht_key.enc_key[20]; + case 2: + return &aentry->ht_key.enc_key[38]; + default: + WARN_ON_ONCE(1); + return &aentry->ht_key.enc_key[0]; + } +}
static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c) { @@ -235,9 +250,10 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, u8 key_offset, u8 chunk_key_len, u8 chunk_len) { struct mlxsw_afk_key_info *key_info = aregion->region->key_info; - u8 chunk_index, chunk_count, block_count; + u8 chunk_index, chunk_count; char *chunk = output; __be16 erp_region_id; + u32 block_count;
block_count = mlxsw_afk_key_info_blocks_count_get(key_info); chunk_count = 1 + ((block_count - 1) >> 2); @@ -245,12 +261,13 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, (aregion->region->id << 4)); for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks; chunk_index++) { + char *enc_key; + memset(chunk, 0, pad_bytes); memcpy(chunk + pad_bytes, &erp_region_id, sizeof(erp_region_id)); - memcpy(chunk + key_offset, - &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]], - chunk_key_len); + enc_key = mlxsw_sp_acl_bf_enc_key_get(aentry, chunk_index); + memcpy(chunk + key_offset, enc_key, chunk_key_len); chunk += chunk_len; } *len = chunk_count * chunk_len; diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 4a777b449ecd..0be44dcb3393 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -942,6 +942,12 @@ static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
extts = &ptp->extts[index];
+ if (extts_request->flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + if (on) { extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index); if (extts_pin < 0) diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 6e4ef7af27bf..b4365906669f 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -179,8 +179,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, /* Reject requests with unsupported flags */ if (req->flags & ~(PTP_ENABLE_FEATURE | PTP_RISING_EDGE | - PTP_FALLING_EDGE | - PTP_STRICT_FLAGS)) + PTP_FALLING_EDGE)) return -EOPNOTSUPP;
if (req->index) diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 7f7d560cb2b4..3a06e3b1bd6b 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -450,9 +450,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data) net_dev->hw_enc_features |= efx->type->offload_features; net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO; - netif_set_tso_max_segs(net_dev, - ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT); - efx->mdio.dev = net_dev; + nic_data = efx->nic_data; + netif_set_tso_max_size(efx->net_dev, nic_data->tso_max_payload_len); + netif_set_tso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs);
rc = efx_ef100_init_datapath_caps(efx); if (rc < 0) @@ -478,7 +478,6 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data) /* Don't fail init if RSS setup doesn't work. */ efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
- nic_data = efx->nic_data; rc = ef100_get_mac_address(efx, net_dev->perm_addr, CLIENT_HANDLE_SELF, efx->type->is_vf); if (rc) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 62e674d6ff60..3ad95a4c8af2 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -887,8 +887,7 @@ static int ef100_process_design_param(struct efx_nic *efx, case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS: /* We always put HDR_NUM_SEGS=1 in our TSO descriptors */ if (!reader->value) { - netif_err(efx, probe, efx->net_dev, - "TSO_MAX_HDR_NUM_SEGS < 1\n"); + pci_err(efx->pci_dev, "TSO_MAX_HDR_NUM_SEGS < 1\n"); return -EOPNOTSUPP; } return 0; @@ -901,32 +900,28 @@ static int ef100_process_design_param(struct efx_nic *efx, */ if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE || EFX_MIN_DMAQ_SIZE % (u32)reader->value) { - netif_err(efx, probe, efx->net_dev, - "%s size granularity is %llu, can't guarantee safety\n", - reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ", - reader->value); + pci_err(efx->pci_dev, + "%s size granularity is %llu, can't guarantee safety\n", + reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ", + reader->value); return -EOPNOTSUPP; } return 0; case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN: nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_LEGACY_MAX_SIZE); - netif_set_tso_max_size(efx->net_dev, - nic_data->tso_max_payload_len); return 0; case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS: nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff); - netif_set_tso_max_segs(efx->net_dev, - nic_data->tso_max_payload_num_segs); return 0; case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES: nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff); return 0; case ESE_EF100_DP_GZ_COMPAT: if (reader->value) { - netif_err(efx, probe, efx->net_dev, - "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n", - reader->value); + pci_err(efx->pci_dev, + "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n", + reader->value); return -EOPNOTSUPP; } return 0; @@ -946,10 +941,10 @@ static int ef100_process_design_param(struct efx_nic *efx, * So the value of this shouldn't matter. */ if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT) - netif_dbg(efx, probe, efx->net_dev, - "NIC has other than default VI_STRIDES (mask " - "%#llx), early probing might use wrong one\n", - reader->value); + pci_dbg(efx->pci_dev, + "NIC has other than default VI_STRIDES (mask " + "%#llx), early probing might use wrong one\n", + reader->value); return 0; case ESE_EF100_DP_GZ_RX_MAX_RUNT: /* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't @@ -961,9 +956,9 @@ static int ef100_process_design_param(struct efx_nic *efx, /* Host interface says "Drivers should ignore design parameters * that they do not recognise." */ - netif_dbg(efx, probe, efx->net_dev, - "Ignoring unrecognised design parameter %u\n", - reader->type); + pci_dbg(efx->pci_dev, + "Ignoring unrecognised design parameter %u\n", + reader->type); return 0; } } @@ -999,13 +994,13 @@ static int ef100_check_design_params(struct efx_nic *efx) */ if (reader.state != EF100_TLV_TYPE) { if (reader.state == EF100_TLV_TYPE_CONT) - netif_err(efx, probe, efx->net_dev, - "truncated design parameter (incomplete type %u)\n", - reader.type); + pci_err(efx->pci_dev, + "truncated design parameter (incomplete type %u)\n", + reader.type); else - netif_err(efx, probe, efx->net_dev, - "truncated design parameter %u\n", - reader.type); + pci_err(efx->pci_dev, + "truncated design parameter %u\n", + reader.type); rc = -EIO; } out: diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 650136dfc642..112e55b98ed3 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -474,28 +474,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, } }
-/************************************************************************** - * - * ioctls - * - *************************************************************************/ - -/* Net device ioctl - * Context: process, rtnl_lock() held. - */ -static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - struct mii_ioctl_data *data = if_mii(ifr); - - /* Convert phy_id from older PRTAD/DEVAD format */ - if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && - (data->phy_id & 0xfc00) == 0x0400) - data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; - - return mdio_mii_ioctl(&efx->mdio, data, cmd); -} - /************************************************************************** * * Kernel net device interface @@ -593,7 +571,6 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_tx_timeout = efx_watchdog, .ndo_start_xmit = efx_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_eth_ioctl = efx_ioctl, .ndo_change_mtu = efx_change_mtu, .ndo_set_mac_address = efx_set_mac_address, .ndo_set_rx_mode = efx_set_rx_mode, @@ -1201,7 +1178,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev, rc = efx_init_struct(efx, pci_dev); if (rc) goto fail1; - efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index ad4694fa3dda..7b236d291d8c 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -17,58 +17,6 @@ #include "selftest.h" #include "mcdi_port_common.h"
-static int efx_mcdi_mdio_read(struct net_device *net_dev, - int prtad, int devad, u16 addr) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN); - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus); - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); - - rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); - if (rc) - return rc; - - if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) != - MC_CMD_MDIO_STATUS_GOOD) - return -EIO; - - return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); -} - -static int efx_mcdi_mdio_write(struct net_device *net_dev, - int prtad, int devad, u16 addr, u16 value) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN); - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); - - rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); - if (rc) - return rc; - - if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) != - MC_CMD_MDIO_STATUS_GOOD) - return -EIO; - - return 0; -}
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx) { @@ -97,12 +45,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx) { int rc;
- /* Set up MDIO structure for PHY */ - efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; - efx->mdio.mdio_read = efx_mcdi_mdio_read; - efx->mdio.mdio_write = efx_mcdi_mdio_write; - - /* Fill out MDIO structure, loopback modes, and initial link state */ + /* Fill out loopback modes and initial link state */ rc = efx_mcdi_phy_probe(efx); if (rc != 0) return rc; diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c index 76ea26722ca4..dae684194ac8 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/mcdi_port_common.c @@ -448,15 +448,6 @@ int efx_mcdi_phy_probe(struct efx_nic *efx) efx->phy_data = phy_data; efx->phy_type = phy_data->type;
- efx->mdio_bus = phy_data->channel; - efx->mdio.prtad = phy_data->port; - efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); - efx->mdio.mode_support = 0; - if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) - efx->mdio.mode_support |= MDIO_SUPPORTS_C22; - if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) - efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; - caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) mcdi_to_ethtool_linkset(phy_data->media, caps, @@ -546,8 +537,6 @@ void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ks cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media); cmd->base.phy_address = phy_cfg->port; cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg); - cmd->base.mdio_support = (efx->mdio.mode_support & - (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap, cmd->link_modes.supported); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index f70a7b7d6345..1d3e0f3101d4 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -15,7 +15,7 @@ #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/timer.h> -#include <linux/mdio.h> +#include <linux/mii.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/device.h> @@ -956,8 +956,6 @@ struct efx_mae; * @stats_buffer: DMA buffer for statistics * @phy_type: PHY type * @phy_data: PHY private data (including PHY-specific stats) - * @mdio: PHY MDIO interface - * @mdio_bus: PHY MDIO bus ID (only used by Siena) * @phy_mode: PHY operating mode. Serialised by @mac_lock. * @link_advertising: Autonegotiation advertising flags * @fec_config: Forward Error Correction configuration flags. For bit positions @@ -1131,8 +1129,6 @@ struct efx_nic {
unsigned int phy_type; void *phy_data; - struct mdio_if_info mdio; - unsigned int mdio_bus; enum efx_phy_mode phy_mode;
__ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 2b3d6586f44a..497abf2723a5 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -1082,26 +1082,6 @@ static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); }
-static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr) -{ - struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); - - *nexthdr = hdr->nexthdr; - offset += sizeof(struct ipv6hdr); - while (ipv6_ext_hdr(*nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - if (*nexthdr == NEXTHDR_NONE) - return; - hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); - if (!hp) - return; - if (*nexthdr == NEXTHDR_FRAGMENT) - break; - *nexthdr = hp->nexthdr; - } -} - union network_header { struct iphdr *ipv4; struct ipv6hdr *ipv6; @@ -1112,6 +1092,8 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) { u8 tun_prot = 0, l4_prot = 0, ptype = 0; struct sk_buff *skb = first->skb; + unsigned char *exthdr, *l4_hdr; + __be16 frag_off;
if (skb->encapsulation) { union network_header hdr; @@ -1122,14 +1104,18 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) ptype = WX_PTYPE_TUN_IPV4; break; case htons(ETH_P_IPV6): - wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot); + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off); ptype = WX_PTYPE_TUN_IPV6; break; default: return ptype; }
- if (tun_prot == IPPROTO_IPIP) { + if (tun_prot == IPPROTO_IPIP || tun_prot == IPPROTO_IPV6) { hdr.raw = (void *)inner_ip_hdr(skb); ptype |= WX_PTYPE_PKT_IPIP; } else if (tun_prot == IPPROTO_UDP) { @@ -1166,7 +1152,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) l4_prot = hdr.ipv4->protocol; break; case 6: - wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot); + l4_hdr = skb_inner_transport_header(skb); + exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = inner_ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off); ptype |= WX_PTYPE_PKT_IPV6; break; default: @@ -1179,7 +1169,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) ptype = WX_PTYPE_PKT_IP; break; case htons(ETH_P_IPV6): - wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot); + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off); ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6; break; default: @@ -1269,13 +1263,20 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ if (enc) { + unsigned char *exthdr, *l4_hdr; + __be16 frag_off; + switch (first->protocol) { case htons(ETH_P_IP): tun_prot = ip_hdr(skb)->protocol; first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4; break; case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off); break; default: break; @@ -1298,6 +1299,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, WX_TXD_TUNNEL_LEN_SHIFT); break; case IPPROTO_IPIP: + case IPPROTO_IPV6: tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - (char *)ip_hdr(skb)) >> 2) << WX_TXD_OUTER_IPLEN_SHIFT; @@ -1335,12 +1337,15 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, u8 tun_prot = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) && !(first->tx_flags & WX_TX_FLAGS_CC)) return; vlan_macip_lens = skb_network_offset(skb) << WX_TXD_MACLEN_SHIFT; } else { + unsigned char *exthdr, *l4_hdr; + __be16 frag_off; u8 l4_prot = 0; union { struct iphdr *ipv4; @@ -1362,7 +1367,12 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, tun_prot = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); break; default: return; @@ -1386,6 +1396,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, WX_TXD_TUNNEL_LEN_SHIFT); break; case IPPROTO_IPIP: + case IPPROTO_IPV6: tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - (char *)ip_hdr(skb)) >> 2) << WX_TXD_OUTER_IPLEN_SHIFT; @@ -1408,7 +1419,10 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, break; case 6: vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; + exthdr = network_hdr.raw + sizeof(struct ipv6hdr); l4_prot = network_hdr.ipv6->nexthdr; + if (transport_hdr.raw != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off); break; default: break; @@ -1428,7 +1442,8 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, WX_TXD_L4LEN_SHIFT; break; default: - break; + skb_checksum_help(skb); + goto csum_failed; }
/* update TX checksum flag */ diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c index b4ef386bdb1b..7c017fe35522 100644 --- a/drivers/net/ipvlan/ipvlan_l3s.c +++ b/drivers/net/ipvlan/ipvlan_l3s.c @@ -226,5 +226,4 @@ void ipvlan_l3s_unregister(struct ipvl_port *port)
dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); - dev->l3mdev_ops = NULL; } diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c index 208e8f561e06..eba8b5fb1365 100644 --- a/drivers/net/phy/bcm-phy-ptp.c +++ b/drivers/net/phy/bcm-phy-ptp.c @@ -597,7 +597,8 @@ static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv,
period = BCM_MAX_PERIOD_8NS; /* write nonzero value */
- if (req->flags & PTP_PEROUT_PHASE) + /* Reject unsupported flags */ + if (req->flags & ~PTP_PEROUT_DUTY_CYCLE) return -EOPNOTSUPP;
if (req->flags & PTP_PEROUT_DUTY_CYCLE) diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 22edb7e4c1a1..5a963b2b7ea7 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -859,7 +859,7 @@ static int brcm_fet_config_init(struct phy_device *phydev) return reg;
/* Unmask events we are interested in and mask interrupts globally. */ - if (phydev->phy_id == PHY_ID_BCM5221) + if (phydev->drv->phy_id == PHY_ID_BCM5221) reg = MII_BRCM_FET_IR_ENABLE | MII_BRCM_FET_IR_MASK; else @@ -888,7 +888,7 @@ static int brcm_fet_config_init(struct phy_device *phydev) return err; }
- if (phydev->phy_id != PHY_ID_BCM5221) { + if (phydev->drv->phy_id != PHY_ID_BCM5221) { /* Set the LED mode */ reg = __phy_read(phydev, MII_BRCM_FET_SHDW_AUXMODE4); if (reg < 0) { @@ -1009,7 +1009,7 @@ static int brcm_fet_suspend(struct phy_device *phydev) return err; }
- if (phydev->phy_id == PHY_ID_BCM5221) + if (phydev->drv->phy_id == PHY_ID_BCM5221) /* Force Low Power Mode with clock enabled */ reg = BCM5221_SHDW_AM4_EN_CLK_LPM | BCM5221_SHDW_AM4_FORCE_LPM; else diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 7b3739b29c8f..bb0bf1415872 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -630,6 +630,16 @@ static const struct driver_info zte_rndis_info = { .tx_fixup = rndis_tx_fixup, };
+static const struct driver_info wwan_rndis_info = { + .description = "Mobile Broadband RNDIS device", + .flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, + .bind = rndis_bind, + .unbind = rndis_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, +}; + /*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = { @@ -666,9 +676,11 @@ static const struct usb_device_id products [] = { USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), .driver_info = (unsigned long) &rndis_info, }, { - /* Novatel Verizon USB730L */ + /* Mobile Broadband Modem, seen in Novatel Verizon USB730L and + * Telit FN990A (RNDIS) + */ USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), - .driver_info = (unsigned long) &rndis_info, + .driver_info = (unsigned long)&wwan_rndis_info, }, { }, // END }; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index aeab2308b150..724b93aa4f7e 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -530,7 +530,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) netif_device_present (dev->net) && test_bit(EVENT_DEV_OPEN, &dev->flags) && !test_bit (EVENT_RX_HALT, &dev->flags) && - !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { + !test_bit (EVENT_DEV_ASLEEP, &dev->flags) && + !usbnet_going_away(dev)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); @@ -551,8 +552,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) tasklet_schedule (&dev->bh); break; case 0: - if (!usbnet_going_away(dev)) - __usbnet_queue_skb(&dev->rxq, skb, rx_start); + __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7646ddd9bef7..d1ed544ba03a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -368,15 +368,15 @@ struct receive_queue { */ #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 struct virtio_net_ctrl_rss { - u32 hash_types; - u16 indirection_table_mask; - u16 unclassified_queue; - u16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */ - u16 max_tx_vq; + __le32 hash_types; + __le16 indirection_table_mask; + __le16 unclassified_queue; + __le16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */ + __le16 max_tx_vq; u8 hash_key_length; u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
- u16 *indirection_table; + __le16 *indirection_table; };
/* Control VQ buffers: protected by the rtnl lock */ @@ -3576,9 +3576,9 @@ static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pair
for (; i < vi->rss_indir_table_size; ++i) { indir_val = ethtool_rxfh_indir_default(i, queue_pairs); - vi->rss.indirection_table[i] = indir_val; + vi->rss.indirection_table[i] = cpu_to_le16(indir_val); } - vi->rss.max_tx_vq = queue_pairs; + vi->rss.max_tx_vq = cpu_to_le16(queue_pairs); }
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) @@ -4097,10 +4097,10 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
static void virtnet_init_default_rss(struct virtnet_info *vi) { - vi->rss.hash_types = vi->rss_hash_types_supported; + vi->rss.hash_types = cpu_to_le32(vi->rss_hash_types_supported); vi->rss_hash_types_saved = vi->rss_hash_types_supported; vi->rss.indirection_table_mask = vi->rss_indir_table_size - ? vi->rss_indir_table_size - 1 : 0; + ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0; vi->rss.unclassified_queue = 0;
virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); @@ -4218,7 +4218,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
if (new_hashtypes != vi->rss_hash_types_saved) { vi->rss_hash_types_saved = new_hashtypes; - vi->rss.hash_types = vi->rss_hash_types_saved; + vi->rss.hash_types = cpu_to_le32(vi->rss_hash_types_saved); if (vi->dev->features & NETIF_F_RXHASH) return virtnet_commit_rss_command(vi); } @@ -5398,7 +5398,7 @@ static int virtnet_get_rxfh(struct net_device *dev,
if (rxfh->indir) { for (i = 0; i < vi->rss_indir_table_size; ++i) - rxfh->indir[i] = vi->rss.indirection_table[i]; + rxfh->indir[i] = le16_to_cpu(vi->rss.indirection_table[i]); }
if (rxfh->key) @@ -5426,7 +5426,7 @@ static int virtnet_set_rxfh(struct net_device *dev, return -EOPNOTSUPP;
for (i = 0; i < vi->rss_indir_table_size; ++i) - vi->rss.indirection_table[i] = rxfh->indir[i]; + vi->rss.indirection_table[i] = cpu_to_le16(rxfh->indir[i]); update = true; }
@@ -6044,9 +6044,9 @@ static int virtnet_set_features(struct net_device *dev,
if ((dev->features ^ features) & NETIF_F_RXHASH) { if (features & NETIF_F_RXHASH) - vi->rss.hash_types = vi->rss_hash_types_saved; + vi->rss.hash_types = cpu_to_le32(vi->rss_hash_types_saved); else - vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; + vi->rss.hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE);
if (!virtnet_commit_rss_command(vi)) return -EINVAL; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 6793fa09f9d1..3df6aabc7e33 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2033,6 +2033,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
rq->comp_ring.gen = VMXNET3_INIT_GEN; rq->comp_ring.next2proc = 0; + + if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) + xdp_rxq_info_unreg(&rq->xdp_rxq); + page_pool_destroy(rq->page_pool); + rq->page_pool = NULL; }
@@ -2073,11 +2078,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, } }
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) - xdp_rxq_info_unreg(&rq->xdp_rxq); - page_pool_destroy(rq->page_pool); - rq->page_pool = NULL; - if (rq->data_ring.base) { dma_free_coherent(&adapter->pdev->dev, rq->rx_ring[0].size * rq->data_ring.desc_size, diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 029ecf51c9ef..b8b3dce9cdb5 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -4783,7 +4783,7 @@ u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, if (!msdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "msdu_pop: invalid buf_id %d\n", buf_id); - break; + goto next_msdu; } rxcb = ATH11K_SKB_RXCB(msdu); if (!rxcb->unmapped) { @@ -5148,7 +5148,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; const struct ath11k_hw_hal_params *hal_params; void *ring_entry; - void *mon_dst_srng; + struct hal_srng *mon_dst_srng; u32 ppdu_id; u32 rx_bufs_used; u32 ring_id; @@ -5165,6 +5165,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
spin_lock_bh(&pmon->mon_lock);
+ spin_lock_bh(&mon_dst_srng->lock); ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
ppdu_id = pmon->mon_ppdu_info.ppdu_id; @@ -5223,6 +5224,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, mon_dst_srng); } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); + spin_unlock_bh(&mon_dst_srng->lock);
spin_unlock_bh(&pmon->mon_lock);
@@ -5410,7 +5412,7 @@ ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar, "full mon msdu_pop: invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); - break; + goto next_msdu; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); @@ -5612,7 +5614,7 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, struct hal_sw_mon_ring_entries *sw_mon_entries; struct ath11k_pdev_mon_stats *rx_mon_stats; struct sk_buff *head_msdu, *tail_msdu; - void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + struct hal_srng *mon_dst_srng; void *ring_entry; u32 rx_bufs_used = 0, mpdu_rx_bufs_used; int quota = 0, ret; @@ -5628,6 +5630,9 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, goto reap_status_ring; }
+ mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + spin_lock_bh(&mon_dst_srng->lock); + ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { head_msdu = NULL; @@ -5671,6 +5676,7 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, }
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); + spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock);
if (rx_bufs_used) { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 1556392f7ad4..1298a3190a3c 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -5336,8 +5336,6 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif) if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; - if (nsts > (ar->num_rx_chains - 1)) - nsts = ar->num_rx_chains - 1; value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); }
@@ -5421,9 +5419,6 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
/* Enable Beamformee STS Field only if SU BF is enabled */ if (subfee) { - if (nsts > (ar->num_rx_chains - 1)) - nsts = ar->num_rx_chains - 1; - nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; *vht_cap |= nsts; diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index b93f04973ad7..eaac9eabcc70 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -939,6 +939,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev, return 0;
err_free_irq: + /* __free_irq() expects the caller to have cleared the affinity hint */ + ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); ath11k_pcic_free_irq(ab);
err_ce_free: diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index b0f289784dd3..7bfe47ad62a0 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/rtnetlink.h>
@@ -55,6 +55,19 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Regulatory Notification received for %s\n", wiphy_name(wiphy));
+ if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) { + ath11k_dbg(ar->ab, ATH11K_DBG_REG, + "driver initiated regd update\n"); + if (ar->state != ATH11K_STATE_ON) + return; + + ret = ath11k_reg_update_chan_list(ar, true); + if (ret) + ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret); + + return; + } + /* Currently supporting only General User Hints. Cell base user * hints to be handled later. * Hints from other sources like Core, Beacons are not expected for @@ -293,12 +306,6 @@ int ath11k_regd_update(struct ath11k *ar) if (ret) goto err;
- if (ar->state == ATH11K_STATE_ON) { - ret = ath11k_reg_update_chan_list(ar, true); - if (ret) - goto err; - } - return 0; err: ath11k_warn(ab, "failed to perform regd update : %d\n", ret); @@ -977,6 +984,7 @@ void ath11k_regd_update_work(struct work_struct *work) void ath11k_reg_init(struct ath11k *ar) { ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED; + ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER; ar->hw->wiphy->reg_notifier = ath11k_reg_notifier; }
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 0606116d6b9c..212cd935e60a 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -1122,16 +1122,18 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab) ath12k_core_stop(ab); mutex_unlock(&ab->core_lock); } + mutex_unlock(&ag->mutex); goto exit;
err_dp_free: ath12k_dp_free(ab); mutex_unlock(&ab->core_lock); + mutex_unlock(&ag->mutex); + err_firmware_stop: ath12k_qmi_firmware_stop(ab);
exit: - mutex_unlock(&ag->mutex); return ret; }
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index dad35bfd83f6..68d609f2ac60 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -4032,7 +4032,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, hw_links[hw_link_id].pdev_idx); ar = partner_ab->pdevs[pdev_id].ar;
- if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) { + if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) { dev_kfree_skb_any(msdu); continue; } diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index a8d341a6df01..1fffabaca527 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -368,6 +368,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, add_htt_metadata = true; msdu_ext_desc = true; ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW); + ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT; ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW; ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN; } @@ -398,6 +399,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, if (ret < 0) { ath12k_dbg(ab, ATH12K_DBG_DP_TX, "Failed to add HTT meta data, dropping packet\n"); + kfree_skb(skb_ext_desc); goto fail_unmap_dma; } } diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 2d062b5904a8..9c3e66dbe0c3 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -8066,6 +8066,7 @@ static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id]; struct ath12k_base *ab = ar->ab; + struct ieee80211_bss_conf *link_conf;
int ret;
@@ -8084,7 +8085,13 @@ static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif }
if (cache->bss_conf_changed) { - ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf, + link_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!link_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf in cache flush for vif %pM link %u\n", + vif->addr, arvif->link_id); + return; + } + ath12k_mac_bss_info_changed(ar, arvif, link_conf, cache->bss_conf_changed); }
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 06cff3849ab8..2851f6944b86 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -1689,6 +1689,8 @@ static int ath12k_pci_probe(struct pci_dev *pdev, return 0;
err_free_irq: + /* __free_irq() expects the caller to have cleared the affinity hint */ + ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); ath12k_pci_free_irq(ab);
err_ce_free: diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index abb510d235a5..7a87777e0a04 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -2794,6 +2794,8 @@ int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar, WMI_CHAN_REG_INFO1_REG_CLS); *reg2 |= le32_encode_bits(channel_arg->antennamax, WMI_CHAN_REG_INFO2_ANT_MAX); + *reg2 |= le32_encode_bits(channel_arg->maxregpower, + WMI_CHAN_REG_INFO2_MAX_TX_PWR);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 628eeec4b82f..300d178830ad 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -628,12 +628,12 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h else RX_STAT_INC(sc, rx_spectral_sample_err);
- memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN); - /* Mix the received bins to the /dev/random * pool */ add_device_randomness(sample_buf, num_bins); + + memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN); }
/* Process a normal frame */ diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 4a96281792cc..91458f3bd14a 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -454,6 +454,11 @@ enum mwifiex_channel_flags { #define HostCmd_RET_BIT 0x8000 #define HostCmd_ACT_GEN_GET 0x0000 #define HostCmd_ACT_GEN_SET 0x0001 +#define HOST_CMD_ACT_GEN_SET 0x0001 +/* Add this non-CamelCase-style macro to comply with checkpatch requirements. + * This macro will eventually replace all existing CamelCase-style macros in + * the future for consistency. + */ #define HostCmd_ACT_GEN_REMOVE 0x0004 #define HostCmd_ACT_BITWISE_SET 0x0002 #define HostCmd_ACT_BITWISE_CLR 0x0003 @@ -2352,6 +2357,14 @@ struct host_cmd_ds_add_station { u8 tlv[]; } __packed;
+#define MWIFIEX_CFG_TYPE_CAL 0x2 + +struct host_cmd_ds_802_11_cfg_data { + __le16 action; + __le16 type; + __le16 data_len; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -2431,6 +2444,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_pkt_aggr_ctrl pkt_aggr_ctrl; struct host_cmd_ds_sta_configure sta_cfg; struct host_cmd_ds_add_station sta_info; + struct host_cmd_ds_802_11_cfg_data cfg_data; } params; } __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 855019fe5485..80fc6d5afe86 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -691,10 +691,6 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
init_failed = true; done: - if (adapter->cal_data) { - release_firmware(adapter->cal_data); - adapter->cal_data = NULL; - } if (adapter->firmware) { release_firmware(adapter->firmware); adapter->firmware = NULL; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index e2800a831c8e..c4689f5a1acc 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -1507,6 +1507,7 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, u32 len; u8 *data = (u8 *)cmd + S_DS_GEN; int ret; + struct host_cmd_ds_802_11_cfg_data *pcfg_data;
if (prop) { len = prop->length; @@ -1514,12 +1515,20 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, data, len); if (ret) return ret; + + cmd->size = cpu_to_le16(S_DS_GEN + len); mwifiex_dbg(adapter, INFO, "download cfg_data from device tree: %s\n", prop->name); } else if (adapter->cal_data->data && adapter->cal_data->size > 0) { len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data, - adapter->cal_data->size, data); + adapter->cal_data->size, + data + sizeof(*pcfg_data)); + pcfg_data = &cmd->params.cfg_data; + pcfg_data->action = cpu_to_le16(HOST_CMD_ACT_GEN_SET); + pcfg_data->type = cpu_to_le16(MWIFIEX_CFG_TYPE_CAL); + pcfg_data->data_len = cpu_to_le16(len); + cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*pcfg_data) + len); mwifiex_dbg(adapter, INFO, "download cfg_data from config file\n"); } else { @@ -1527,7 +1536,6 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, }
cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA); - cmd->size = cpu_to_le16(S_DS_GEN + len);
return 0; } @@ -2293,9 +2301,13 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) "marvell,caldata"); }
- if (adapter->cal_data) + if (adapter->cal_data) { mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA, HostCmd_ACT_GEN_SET, 0, NULL, true); + release_firmware(adapter->cal_data); + adapter->cal_data = NULL; + } +
/* Read MAC address from HW */ ret = mwifiex_send_cmd(priv, HostCmd_CMD_GET_HW_SPEC, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 578013884e43..4fec7d000a63 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -303,9 +303,9 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) phy->mib.dl_vht_3mu_cnt, phy->mib.dl_vht_4mu_cnt);
- sub_total_cnt = phy->mib.dl_vht_2mu_cnt + - phy->mib.dl_vht_3mu_cnt + - phy->mib.dl_vht_4mu_cnt; + sub_total_cnt = (u64)phy->mib.dl_vht_2mu_cnt + + phy->mib.dl_vht_3mu_cnt + + phy->mib.dl_vht_4mu_cnt;
seq_printf(file, "\nTotal non-HE MU-MIMO DL PPDU count: %lld", sub_total_cnt); @@ -353,26 +353,27 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) phy->mib.dl_he_9to16ru_cnt, phy->mib.dl_he_gtr16ru_cnt);
- sub_total_cnt = phy->mib.dl_he_2mu_cnt + - phy->mib.dl_he_3mu_cnt + - phy->mib.dl_he_4mu_cnt; + sub_total_cnt = (u64)phy->mib.dl_he_2mu_cnt + + phy->mib.dl_he_3mu_cnt + + phy->mib.dl_he_4mu_cnt; total_ppdu_cnt = sub_total_cnt;
seq_printf(file, "\nTotal HE MU-MIMO DL PPDU count: %lld", sub_total_cnt);
- sub_total_cnt = phy->mib.dl_he_2ru_cnt + - phy->mib.dl_he_3ru_cnt + - phy->mib.dl_he_4ru_cnt + - phy->mib.dl_he_5to8ru_cnt + - phy->mib.dl_he_9to16ru_cnt + - phy->mib.dl_he_gtr16ru_cnt; + sub_total_cnt = (u64)phy->mib.dl_he_2ru_cnt + + phy->mib.dl_he_3ru_cnt + + phy->mib.dl_he_4ru_cnt + + phy->mib.dl_he_5to8ru_cnt + + phy->mib.dl_he_9to16ru_cnt + + phy->mib.dl_he_gtr16ru_cnt; total_ppdu_cnt += sub_total_cnt;
seq_printf(file, "\nTotal HE OFDMA DL PPDU count: %lld", sub_total_cnt);
- total_ppdu_cnt += phy->mib.dl_he_su_cnt + phy->mib.dl_he_ext_su_cnt; + total_ppdu_cnt += (u64)phy->mib.dl_he_su_cnt + + phy->mib.dl_he_ext_su_cnt;
seq_printf(file, "\nAll HE DL PPDU count: %lld", total_ppdu_cnt);
@@ -404,20 +405,20 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) phy->mib.ul_hetrig_9to16ru_cnt, phy->mib.ul_hetrig_gtr16ru_cnt);
- sub_total_cnt = phy->mib.ul_hetrig_2mu_cnt + - phy->mib.ul_hetrig_3mu_cnt + - phy->mib.ul_hetrig_4mu_cnt; + sub_total_cnt = (u64)phy->mib.ul_hetrig_2mu_cnt + + phy->mib.ul_hetrig_3mu_cnt + + phy->mib.ul_hetrig_4mu_cnt; total_ppdu_cnt = sub_total_cnt;
seq_printf(file, "\nTotal HE MU-MIMO UL TB PPDU count: %lld", sub_total_cnt);
- sub_total_cnt = phy->mib.ul_hetrig_2ru_cnt + - phy->mib.ul_hetrig_3ru_cnt + - phy->mib.ul_hetrig_4ru_cnt + - phy->mib.ul_hetrig_5to8ru_cnt + - phy->mib.ul_hetrig_9to16ru_cnt + - phy->mib.ul_hetrig_gtr16ru_cnt; + sub_total_cnt = (u64)phy->mib.ul_hetrig_2ru_cnt + + phy->mib.ul_hetrig_3ru_cnt + + phy->mib.ul_hetrig_4ru_cnt + + phy->mib.ul_hetrig_5to8ru_cnt + + phy->mib.ul_hetrig_9to16ru_cnt + + phy->mib.ul_hetrig_gtr16ru_cnt; total_ppdu_cnt += sub_total_cnt;
seq_printf(file, "\nTotal HE OFDMA UL TB PPDU count: %lld", diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 13e58c328aff..78b77a54d195 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -811,6 +811,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx; msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET; msta->deflink.last_txs = jiffies; + msta->deflink.sta = msta;
ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 15815ad84713..9e192b7e1d2e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -3155,7 +3155,6 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
.idx = idx, .env = env_cap, - .acpi_conf = mt792x_acpi_get_flags(&dev->phy), }; int ret, valid_cnt = 0; u8 i, *pos; diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index ff4894c7fa8a..93e41def81b4 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -5135,7 +5135,7 @@ struct rtw89_tssi_info { u32 alignment_backup_by_ch[RF_PATH_MAX][TSSI_MAX_CH_NUM][TSSI_ALIMK_VALUE_NUM]; u32 alignment_value[RF_PATH_MAX][TSSI_ALIMK_MAX][TSSI_ALIMK_VALUE_NUM]; bool alignment_done[RF_PATH_MAX][TSSI_ALIMK_MAX]; - u32 tssi_alimk_time; + u64 tssi_alimk_time; };
struct rtw89_power_trim_info { diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 5d4ad23cc3bd..2f3869c70069 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -988,7 +988,7 @@ int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
if ((bitmap & needed_bitmap) != needed_bitmap) { - rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", + rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n", needed_bitmap, bitmap); return -ENOENT; } @@ -5311,6 +5311,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; u8 opch_size = sizeof(*opch) * option->num_opch; u8 probe_id[NUM_NL80211_BANDS]; + u8 scan_offload_ver = U8_MAX; u8 cfg_len = sizeof(*h2c); unsigned int cond; u8 ver = U8_MAX; @@ -5321,6 +5322,11 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
rtw89_scan_get_6g_disabled_chan(rtwdev, option);
+ if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { + cfg_len = offsetofend(typeof(*h2c), w8); + scan_offload_ver = 0; + } + len = cfg_len + macc_role_size + opch_size; skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { @@ -5392,10 +5398,8 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); }
- if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { - cfg_len = offsetofend(typeof(*h2c), w8); + if (scan_offload_ver == 0) goto flex_member; - }
h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 4d11c3dd60a5..79fef5f90140 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -455,34 +455,36 @@ #define B_BE_RX0DMA_INT_EN BIT(0)
#define R_BE_HAXI_HISR00 0xB0B4 -#define B_BE_RDU_CH6_INT BIT(28) -#define B_BE_RDU_CH5_INT BIT(27) -#define B_BE_RDU_CH4_INT BIT(26) -#define B_BE_RDU_CH2_INT BIT(25) -#define B_BE_RDU_CH1_INT BIT(24) -#define B_BE_RDU_CH0_INT BIT(23) -#define B_BE_RXDMA_STUCK_INT BIT(22) -#define B_BE_TXDMA_STUCK_INT BIT(21) -#define B_BE_TXDMA_CH14_INT BIT(20) -#define B_BE_TXDMA_CH13_INT BIT(19) -#define B_BE_TXDMA_CH12_INT BIT(18) -#define B_BE_TXDMA_CH11_INT BIT(17) -#define B_BE_TXDMA_CH10_INT BIT(16) -#define B_BE_TXDMA_CH9_INT BIT(15) -#define B_BE_TXDMA_CH8_INT BIT(14) -#define B_BE_TXDMA_CH7_INT BIT(13) -#define B_BE_TXDMA_CH6_INT BIT(12) -#define B_BE_TXDMA_CH5_INT BIT(11) -#define B_BE_TXDMA_CH4_INT BIT(10) -#define B_BE_TXDMA_CH3_INT BIT(9) -#define B_BE_TXDMA_CH2_INT BIT(8) -#define B_BE_TXDMA_CH1_INT BIT(7) -#define B_BE_TXDMA_CH0_INT BIT(6) -#define B_BE_RPQ1DMA_INT BIT(5) -#define B_BE_RX1P1DMA_INT BIT(4) +#define B_BE_RDU_CH5_INT_V1 BIT(30) +#define B_BE_RDU_CH4_INT_V1 BIT(29) +#define B_BE_RDU_CH3_INT_V1 BIT(28) +#define B_BE_RDU_CH2_INT_V1 BIT(27) +#define B_BE_RDU_CH1_INT_V1 BIT(26) +#define B_BE_RDU_CH0_INT_V1 BIT(25) +#define B_BE_RXDMA_STUCK_INT_V1 BIT(24) +#define B_BE_TXDMA_STUCK_INT_V1 BIT(23) +#define B_BE_TXDMA_CH14_INT_V1 BIT(22) +#define B_BE_TXDMA_CH13_INT_V1 BIT(21) +#define B_BE_TXDMA_CH12_INT_V1 BIT(20) +#define B_BE_TXDMA_CH11_INT_V1 BIT(19) +#define B_BE_TXDMA_CH10_INT_V1 BIT(18) +#define B_BE_TXDMA_CH9_INT_V1 BIT(17) +#define B_BE_TXDMA_CH8_INT_V1 BIT(16) +#define B_BE_TXDMA_CH7_INT_V1 BIT(15) +#define B_BE_TXDMA_CH6_INT_V1 BIT(14) +#define B_BE_TXDMA_CH5_INT_V1 BIT(13) +#define B_BE_TXDMA_CH4_INT_V1 BIT(12) +#define B_BE_TXDMA_CH3_INT_V1 BIT(11) +#define B_BE_TXDMA_CH2_INT_V1 BIT(10) +#define B_BE_TXDMA_CH1_INT_V1 BIT(9) +#define B_BE_TXDMA_CH0_INT_V1 BIT(8) +#define B_BE_RX1P1DMA_INT_V1 BIT(7) +#define B_BE_RX0P1DMA_INT_V1 BIT(6) +#define B_BE_RO1DMA_INT BIT(5) +#define B_BE_RP1DMA_INT BIT(4) #define B_BE_RX1DMA_INT BIT(3) -#define B_BE_RPQ0DMA_INT BIT(2) -#define B_BE_RX0P1DMA_INT BIT(1) +#define B_BE_RO0DMA_INT BIT(2) +#define B_BE_RP0DMA_INT BIT(1) #define B_BE_RX0DMA_INT BIT(0)
/* TX/RX */ diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c index cd39eebe8186..12e6a0cbb889 100644 --- a/drivers/net/wireless/realtek/rtw89/pci_be.c +++ b/drivers/net/wireless/realtek/rtw89/pci_be.c @@ -666,7 +666,7 @@ SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be); EXPORT_SYMBOL(rtw89_pm_ops_be);
const struct rtw89_pci_gen_def rtw89_pci_gen_be = { - .isr_rdu = B_BE_RDU_CH1_INT | B_BE_RDU_CH0_INT, + .isr_rdu = B_BE_RDU_CH1_INT_V1 | B_BE_RDU_CH0_INT_V1, .isr_halt_c2h = B_BE_HALT_C2H_INT, .isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT, .isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1}, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c index ef47a5facc83..fbf82d42687b 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c @@ -3585,9 +3585,10 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel); struct rtw8852bx_bb_tssi_bak tssi_bak; s32 aliment_diff, tssi_cw_default; - u32 start_time, finish_time; u32 bb_reg_backup[8] = {0}; + ktime_t start_time; const s16 *power; + s64 this_time; u8 band; bool ok; u32 tmp; @@ -3613,7 +3614,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, return; }
- start_time = ktime_get_ns(); + start_time = ktime_get();
if (chan->band_type == RTW89_BAND_2G) power = power_2g; @@ -3738,12 +3739,12 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak); rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
- finish_time = ktime_get_ns(); - tssi_info->tssi_alimk_time += finish_time - start_time; + this_time = ktime_us_delta(ktime_get(), start_time); + tssi_info->tssi_alimk_time += this_time;
rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[TSSI PA K] %s processing time = %d ms\n", __func__, - tssi_info->tssi_alimk_time); + "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n", + __func__, this_time, tssi_info->tssi_alimk_time); }
void rtw8852b_dpk_init(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c index 336a83e1d46b..6e6889eea9a0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c @@ -3663,9 +3663,10 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel); struct rtw8852bx_bb_tssi_bak tssi_bak; s32 aliment_diff, tssi_cw_default; - u32 start_time, finish_time; u32 bb_reg_backup[8] = {}; + ktime_t start_time; const s16 *power; + s64 this_time; u8 band; bool ok; u32 tmp; @@ -3675,7 +3676,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, "======> %s channel=%d path=%d\n", __func__, channel, path);
- start_time = ktime_get_ns(); + start_time = ktime_get();
if (chan->band_type == RTW89_BAND_2G) power = power_2g; @@ -3802,12 +3803,12 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak); rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
- finish_time = ktime_get_ns(); - tssi_info->tssi_alimk_time += finish_time - start_time; + this_time = ktime_us_delta(ktime_get(), start_time); + tssi_info->tssi_alimk_time += this_time;
rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[TSSI PA K] %s processing time = %d ms\n", __func__, - tssi_info->tssi_alimk_time); + "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n", + __func__, this_time, tssi_info->tssi_alimk_time); }
void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev) diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c index ffcfc3e02c35..a5aa96a31f4a 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen3.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c @@ -215,6 +215,9 @@ static int gen3_init_ntb(struct intel_ntb_dev *ndev) }
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + /* Make sure we are not using DB's used for link status */ + if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) + ndev->db_valid_mask &= ~ndev->db_link_mask;
ndev->reg->db_iowrite(ndev->db_valid_mask, ndev->self_mmio + diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index ad1786be2554..f851397b65d6 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -288,7 +288,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, if (size != 0 && xlate_pos < 12) return -EINVAL;
- if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { + if (xlate_pos >= 0 && !IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { /* * In certain circumstances we can get a buffer that is * not aligned to its size. (Most of the time diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 72bc1d017a46..dfd175f79e8f 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -839,10 +839,8 @@ static int perf_copy_chunk(struct perf_thread *pthr, dma_set_unmap(tx, unmap);
ret = dma_submit_error(dmaengine_submit(tx)); - if (ret) { - dmaengine_unmap_put(unmap); + if (ret) goto err_free_resource; - }
dmaengine_unmap_put(unmap);
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c index a002ea6fdd84..ee478ccde7c6 100644 --- a/drivers/nvdimm/badrange.c +++ b/drivers/nvdimm/badrange.c @@ -167,7 +167,7 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num) dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n", (u64) s * 512, (u64) num * 512); /* this isn't an error as the hardware will still throw an exception */ - if (badblocks_set(bb, s, num, 1)) + if (!badblocks_set(bb, s, num, 1)) dev_info_once(bb->dev, "%s: failed for sector %llx\n", __func__, (u64) s); } diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 5ca06e9a2d29..cc5c8f3f81e8 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -673,7 +673,7 @@ static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, { if (bb->count) { sector_t first_bad; - int num_bad; + sector_t num_bad;
return !!badblocks_check(bb, sector, len / 512, &first_bad, &num_bad); diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index cfdfe0eaa512..8f3e816e805d 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -367,9 +367,10 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn) struct nd_namespace_common *ndns = nd_pfn->ndns; void *zero_page = page_address(ZERO_PAGE(0)); struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; - int num_bad, meta_num, rc, bb_present; + int meta_num, rc, bb_present; sector_t first_bad, meta_start; struct nd_namespace_io *nsio; + sector_t num_bad;
if (nd_pfn->mode != PFN_MODE_PMEM) return 0; @@ -394,7 +395,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn) bb_present = badblocks_check(&nd_region->bb, meta_start, meta_num, &first_bad, &num_bad); if (bb_present) { - dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n", + dev_dbg(&nd_pfn->dev, "meta: %llx badblocks at %llx\n", num_bad, first_bad); nsoff = ALIGN_DOWN((nd_region->ndr_start + (first_bad << 9)) - nsio->res.start, @@ -413,7 +414,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn) } if (rc) { dev_err(&nd_pfn->dev, - "error clearing %x badblocks at %llx\n", + "error clearing %llx badblocks at %llx\n", num_bad, first_bad); return rc; } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index d81faa9d89c9..43156e1576c9 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -249,7 +249,7 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT; struct badblocks *bb = &pmem->bb; sector_t first_bad; - int num_bad; + sector_t num_bad;
if (kaddr) *kaddr = pmem->virt_addr + offset; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 24e2c702da7a..fed6b29098ad 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -141,7 +141,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, struct iov_iter iter;
/* fixedbufs is only for non-vectored io */ - if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) { + if (flags & NVME_IOCTL_VEC) { ret = -EINVAL; goto out; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 3ad7f197c808..1dc12784efaf 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -989,6 +989,9 @@ static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist) { struct request *req;
+ if (rq_list_empty(rqlist)) + return; + spin_lock(&nvmeq->sq_lock); while ((req = rq_list_pop(rqlist))) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c index 220c7391fc19..c6571fbd35e3 100644 --- a/drivers/nvme/target/debugfs.c +++ b/drivers/nvme/target/debugfs.c @@ -78,7 +78,7 @@ static int nvmet_ctrl_state_show(struct seq_file *m, void *p) bool sep = false; int i;
- for (i = 0; i < 7; i++) { + for (i = 0; i < ARRAY_SIZE(csts_state_names); i++) { int state = BIT(i);
if (!(ctrl->csts & state)) diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c index b1e31483f157..99563648c318 100644 --- a/drivers/nvme/target/pci-epf.c +++ b/drivers/nvme/target/pci-epf.c @@ -2129,8 +2129,15 @@ static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf) return -ENODEV; }
- if (epc_features->bar[BAR_0].only_64bit) - epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; + /* + * While NVMe PCIe Transport Specification 1.1, section 2.1.10, claims + * that the BAR0 type is Implementation Specific, in NVMe 1.1, the type + * is required to be 64-bit. Thus, for interoperability, always set the + * type to 64-bit. In the rare case that the PCI EPC does not support + * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail, + * and we will return failure back to the user. + */ + epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
/* * Calculate the size of the register bar: NVMe registers first with diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index e0cc4560dfde..0bf4cde34f51 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -352,8 +352,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, spin_unlock_irqrestore(&ep->lock, flags);
offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | - CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | - CDNS_PCIE_MSG_NO_DATA; + CDNS_PCIE_NORMAL_MSG_CODE(msg_code); writel(0, ep->irq_cpu_addr + offset); }
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index f5eeff834ec1..39ee9945c903 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -246,7 +246,7 @@ struct cdns_pcie_rp_ib_bar { #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) #define CDNS_PCIE_NORMAL_MSG_CODE(code) \ (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) -#define CDNS_PCIE_MSG_NO_DATA BIT(16) +#define CDNS_PCIE_MSG_DATA BIT(16)
struct cdns_pcie;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 8e07d432e74f..e41479a9ca02 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -773,6 +773,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) if (ret) return ret;
+ ret = -ENOMEM; if (!ep->ib_window_map) { ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, GFP_KERNEL); diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 615a0e3e6d7e..1f2f4c28a949 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -409,16 +409,21 @@ static int histb_pcie_probe(struct platform_device *pdev) ret = histb_pcie_host_enable(pp); if (ret) { dev_err(dev, "failed to enable host\n"); - return ret; + goto err_exit_phy; }
ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host\n"); - return ret; + goto err_exit_phy; }
return 0; + +err_exit_phy: + phy_exit(hipcie->phy); + + return ret; }
static void histb_pcie_remove(struct platform_device *pdev) @@ -427,8 +432,7 @@ static void histb_pcie_remove(struct platform_device *pdev)
histb_pcie_host_disable(hipcie);
- if (hipcie->phy) - phy_exit(hipcie->phy); + phy_exit(hipcie->phy); }
static const struct of_device_id histb_pcie_of_match[] = { diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index e733a27dc8df..3d7dbfcd689e 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -403,10 +403,10 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie) static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) { u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); - u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); + u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen; - writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); + writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
lnkctl2 = (lnkctl2 & ~0xf) | gen; writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); @@ -1276,6 +1276,10 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) bool ssc_good = false; int ret, i;
+ /* Limit the generation if specified */ + if (pcie->gen) + brcm_pcie_set_gen(pcie, pcie->gen); + /* Unassert the fundamental reset */ ret = pcie->perst_set(pcie, 0); if (ret) @@ -1302,9 +1306,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
brcm_config_clkreq(pcie);
- if (pcie->gen) - brcm_pcie_set_gen(pcie, pcie->gen); - if (pcie->ssc) { ret = brcm_pcie_set_ssc(pcie); if (ret == 0) @@ -1367,7 +1368,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus)
ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); if (ret) { - dev_info(dev, "No regulators for downstream device\n"); + dev_info(dev, "Did not get regulators, err=%d\n", ret); + pcie->sr = NULL; goto no_regulators; }
@@ -1390,7 +1392,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus) struct subdev_regulators *sr = pcie->sr; struct device *dev = &bus->dev;
- if (!sr) + if (!sr || !bus->parent || !pci_is_root_bus(bus->parent)) return;
if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index aa24ac9aaecc..d0cc7f3b4b52 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -15,6 +15,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_device.h> @@ -24,6 +25,7 @@ #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/regmap.h> #include <linux/reset.h>
#include "../pci.h" @@ -930,9 +932,13 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie) { + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); struct device *dev = pcie->dev; + struct resource_entry *entry; + struct regmap *pbus_regmap; + u32 val, args[2], size; + resource_size_t addr; int err; - u32 val;
/* * The controller may have been left out of reset by the bootloader @@ -945,6 +951,26 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie) /* Wait for the time needed to complete the reset lines assert. */ msleep(PCIE_EN7581_RESET_TIME_MS);
+ /* + * Configure PBus base address and base address mask to allow the + * hw to detect if a given address is accessible on PCIe controller. + */ + pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node, + "mediatek,pbus-csr", + ARRAY_SIZE(args), + args); + if (IS_ERR(pbus_regmap)) + return PTR_ERR(pbus_regmap); + + entry = resource_list_first_type(&host->windows, IORESOURCE_MEM); + if (!entry) + return -ENODEV; + + addr = entry->res->start - entry->offset; + regmap_write(pbus_regmap, args[0], lower_32_bits(addr)); + size = lower_32_bits(resource_size(entry->res)); + regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size))); + /* * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581 * requires PHY initialization and power-on before PHY reset deassert. diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index 81e8bfae53d0..dc8ecdbee56c 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -583,15 +583,17 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) return err;
bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); - if (!bus) - return -ENODEV; + if (!bus) { + err = -ENODEV; + goto err_free_irq_domains; + }
port->variant = of_device_get_match_data(dev);
err = xilinx_cpm_pcie_parse_dt(port, bus->res); if (err) { dev_err(dev, "Parsing DT failed\n"); - goto err_parse_dt; + goto err_free_irq_domains; }
xilinx_cpm_pcie_init_port(port); @@ -615,7 +617,7 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) xilinx_cpm_free_interrupts(port); err_setup_irq: pci_ecam_free(port->cfg); -err_parse_dt: +err_free_irq_domains: xilinx_cpm_free_irq_domains(port); return err; } diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index b94e205ae10b..2409787cf56d 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -66,17 +66,17 @@ struct pci_epf_test { };
struct pci_epf_test_reg { - u32 magic; - u32 command; - u32 status; - u64 src_addr; - u64 dst_addr; - u32 size; - u32 checksum; - u32 irq_type; - u32 irq_number; - u32 flags; - u32 caps; + __le32 magic; + __le32 command; + __le32 status; + __le64 src_addr; + __le64 dst_addr; + __le32 size; + __le32 checksum; + __le32 irq_type; + __le32 irq_number; + __le32 flags; + __le32 caps; } __packed;
static struct pci_epf_header test_header = { @@ -324,13 +324,17 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test, struct pci_epc *epc = epf->epc; struct device *dev = &epf->dev; struct pci_epc_map src_map, dst_map; - u64 src_addr = reg->src_addr; - u64 dst_addr = reg->dst_addr; - size_t copy_size = reg->size; + u64 src_addr = le64_to_cpu(reg->src_addr); + u64 dst_addr = le64_to_cpu(reg->dst_addr); + size_t orig_size, copy_size; ssize_t map_size = 0; + u32 flags = le32_to_cpu(reg->flags); + u32 status = 0; void *copy_buf = NULL, *buf;
- if (reg->flags & FLAG_USE_DMA) { + orig_size = copy_size = le32_to_cpu(reg->size); + + if (flags & FLAG_USE_DMA) { if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) { dev_err(dev, "DMA controller doesn't support MEMCPY\n"); ret = -EINVAL; @@ -350,7 +354,7 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test, src_addr, copy_size, &src_map); if (ret) { dev_err(dev, "Failed to map source address\n"); - reg->status = STATUS_SRC_ADDR_INVALID; + status = STATUS_SRC_ADDR_INVALID; goto free_buf; }
@@ -358,7 +362,7 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test, dst_addr, copy_size, &dst_map); if (ret) { dev_err(dev, "Failed to map destination address\n"); - reg->status = STATUS_DST_ADDR_INVALID; + status = STATUS_DST_ADDR_INVALID; pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map); goto free_buf; @@ -367,7 +371,7 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test, map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
ktime_get_ts64(&start); - if (reg->flags & FLAG_USE_DMA) { + if (flags & FLAG_USE_DMA) { ret = pci_epf_test_data_transfer(epf_test, dst_map.phys_addr, src_map.phys_addr, map_size, 0, DMA_MEM_TO_MEM); @@ -391,8 +395,8 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test, map_size = 0; }
- pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, - &end, reg->flags & FLAG_USE_DMA); + pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end, + flags & FLAG_USE_DMA);
unmap: if (map_size) { @@ -405,9 +409,10 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
set_status: if (!ret) - reg->status |= STATUS_COPY_SUCCESS; + status |= STATUS_COPY_SUCCESS; else - reg->status |= STATUS_COPY_FAIL; + status |= STATUS_COPY_FAIL; + reg->status = cpu_to_le32(status); }
static void pci_epf_test_read(struct pci_epf_test *epf_test, @@ -423,9 +428,14 @@ static void pci_epf_test_read(struct pci_epf_test *epf_test, struct pci_epc *epc = epf->epc; struct device *dev = &epf->dev; struct device *dma_dev = epf->epc->dev.parent; - u64 src_addr = reg->src_addr; - size_t src_size = reg->size; + u64 src_addr = le64_to_cpu(reg->src_addr); + size_t orig_size, src_size; ssize_t map_size = 0; + u32 flags = le32_to_cpu(reg->flags); + u32 checksum = le32_to_cpu(reg->checksum); + u32 status = 0; + + orig_size = src_size = le32_to_cpu(reg->size);
src_buf = kzalloc(src_size, GFP_KERNEL); if (!src_buf) { @@ -439,12 +449,12 @@ static void pci_epf_test_read(struct pci_epf_test *epf_test, src_addr, src_size, &map); if (ret) { dev_err(dev, "Failed to map address\n"); - reg->status = STATUS_SRC_ADDR_INVALID; + status = STATUS_SRC_ADDR_INVALID; goto free_buf; }
map_size = map.pci_size; - if (reg->flags & FLAG_USE_DMA) { + if (flags & FLAG_USE_DMA) { dst_phys_addr = dma_map_single(dma_dev, buf, map_size, DMA_FROM_DEVICE); if (dma_mapping_error(dma_dev, dst_phys_addr)) { @@ -481,11 +491,11 @@ static void pci_epf_test_read(struct pci_epf_test *epf_test, map_size = 0; }
- pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, - &end, reg->flags & FLAG_USE_DMA); + pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end, + flags & FLAG_USE_DMA);
- crc32 = crc32_le(~0, src_buf, reg->size); - if (crc32 != reg->checksum) + crc32 = crc32_le(~0, src_buf, orig_size); + if (crc32 != checksum) ret = -EIO;
unmap: @@ -497,9 +507,10 @@ static void pci_epf_test_read(struct pci_epf_test *epf_test,
set_status: if (!ret) - reg->status |= STATUS_READ_SUCCESS; + status |= STATUS_READ_SUCCESS; else - reg->status |= STATUS_READ_FAIL; + status |= STATUS_READ_FAIL; + reg->status = cpu_to_le32(status); }
static void pci_epf_test_write(struct pci_epf_test *epf_test, @@ -514,9 +525,13 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test, struct pci_epc *epc = epf->epc; struct device *dev = &epf->dev; struct device *dma_dev = epf->epc->dev.parent; - u64 dst_addr = reg->dst_addr; - size_t dst_size = reg->size; + u64 dst_addr = le64_to_cpu(reg->dst_addr); + size_t orig_size, dst_size; ssize_t map_size = 0; + u32 flags = le32_to_cpu(reg->flags); + u32 status = 0; + + orig_size = dst_size = le32_to_cpu(reg->size);
dst_buf = kzalloc(dst_size, GFP_KERNEL); if (!dst_buf) { @@ -524,7 +539,7 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test, goto set_status; } get_random_bytes(dst_buf, dst_size); - reg->checksum = crc32_le(~0, dst_buf, dst_size); + reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size)); buf = dst_buf;
while (dst_size) { @@ -532,12 +547,12 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test, dst_addr, dst_size, &map); if (ret) { dev_err(dev, "Failed to map address\n"); - reg->status = STATUS_DST_ADDR_INVALID; + status = STATUS_DST_ADDR_INVALID; goto free_buf; }
map_size = map.pci_size; - if (reg->flags & FLAG_USE_DMA) { + if (flags & FLAG_USE_DMA) { src_phys_addr = dma_map_single(dma_dev, buf, map_size, DMA_TO_DEVICE); if (dma_mapping_error(dma_dev, src_phys_addr)) { @@ -576,8 +591,8 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test, map_size = 0; }
- pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, - &end, reg->flags & FLAG_USE_DMA); + pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end, + flags & FLAG_USE_DMA);
/* * wait 1ms inorder for the write to complete. Without this delay L3 @@ -594,9 +609,10 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test,
set_status: if (!ret) - reg->status |= STATUS_WRITE_SUCCESS; + status |= STATUS_WRITE_SUCCESS; else - reg->status |= STATUS_WRITE_FAIL; + status |= STATUS_WRITE_FAIL; + reg->status = cpu_to_le32(status); }
static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, @@ -605,39 +621,42 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, struct pci_epf *epf = epf_test->epf; struct device *dev = &epf->dev; struct pci_epc *epc = epf->epc; - u32 status = reg->status | STATUS_IRQ_RAISED; + u32 status = le32_to_cpu(reg->status); + u32 irq_number = le32_to_cpu(reg->irq_number); + u32 irq_type = le32_to_cpu(reg->irq_type); int count;
/* * Set the status before raising the IRQ to ensure that the host sees * the updated value when it gets the IRQ. */ - WRITE_ONCE(reg->status, status); + status |= STATUS_IRQ_RAISED; + WRITE_ONCE(reg->status, cpu_to_le32(status));
- switch (reg->irq_type) { + switch (irq_type) { case IRQ_TYPE_INTX: pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_IRQ_INTX, 0); break; case IRQ_TYPE_MSI: count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no); - if (reg->irq_number > count || count <= 0) { + if (irq_number > count || count <= 0) { dev_err(dev, "Invalid MSI IRQ number %d / %d\n", - reg->irq_number, count); + irq_number, count); return; } pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, - PCI_IRQ_MSI, reg->irq_number); + PCI_IRQ_MSI, irq_number); break; case IRQ_TYPE_MSIX: count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no); - if (reg->irq_number > count || count <= 0) { + if (irq_number > count || count <= 0) { dev_err(dev, "Invalid MSIX IRQ number %d / %d\n", - reg->irq_number, count); + irq_number, count); return; } pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, - PCI_IRQ_MSIX, reg->irq_number); + PCI_IRQ_MSIX, irq_number); break; default: dev_err(dev, "Failed to raise IRQ, unknown type\n"); @@ -654,21 +673,22 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) struct device *dev = &epf->dev; enum pci_barno test_reg_bar = epf_test->test_reg_bar; struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; + u32 irq_type = le32_to_cpu(reg->irq_type);
- command = READ_ONCE(reg->command); + command = le32_to_cpu(READ_ONCE(reg->command)); if (!command) goto reset_handler;
WRITE_ONCE(reg->command, 0); WRITE_ONCE(reg->status, 0);
- if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) && + if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) && !epf_test->dma_supported) { dev_err(dev, "Cannot transfer data using DMA\n"); goto reset_handler; }
- if (reg->irq_type > IRQ_TYPE_MSIX) { + if (irq_type > IRQ_TYPE_MSIX) { dev_err(dev, "Failed to detect IRQ type\n"); goto reset_handler; } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index bb5a8d9f03ad..28ab393af1c0 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -842,7 +842,9 @@ void pcie_enable_interrupt(struct controller *ctrl) { u16 mask;
- mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE; + mask = PCI_EXP_SLTCTL_DLLSCE; + if (!pciehp_poll_mode) + mask |= PCI_EXP_SLTCTL_HPIE; pcie_write_cmd(ctrl, mask, mask); }
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 9e4770cdd4d5..a964c66b4295 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -285,23 +285,16 @@ const struct attribute_group sriov_vf_dev_attr_group = { .is_visible = sriov_vf_attrs_are_visible, };
-int pci_iov_add_virtfn(struct pci_dev *dev, int id) +static struct pci_dev *pci_iov_scan_device(struct pci_dev *dev, int id, + struct pci_bus *bus) { - int i; - int rc = -ENOMEM; - u64 size; - struct pci_dev *virtfn; - struct resource *res; struct pci_sriov *iov = dev->sriov; - struct pci_bus *bus; - - bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id)); - if (!bus) - goto failed; + struct pci_dev *virtfn; + int rc;
virtfn = pci_alloc_dev(bus); if (!virtfn) - goto failed0; + return ERR_PTR(-ENOMEM);
virtfn->devfn = pci_iov_virtfn_devfn(dev, id); virtfn->vendor = dev->vendor; @@ -314,8 +307,35 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id) pci_read_vf_config_common(virtfn);
rc = pci_setup_device(virtfn); - if (rc) - goto failed1; + if (rc) { + pci_dev_put(dev); + pci_bus_put(virtfn->bus); + kfree(virtfn); + return ERR_PTR(rc); + } + + return virtfn; +} + +int pci_iov_add_virtfn(struct pci_dev *dev, int id) +{ + struct pci_bus *bus; + struct pci_dev *virtfn; + struct resource *res; + int rc, i; + u64 size; + + bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id)); + if (!bus) { + rc = -ENOMEM; + goto failed; + } + + virtfn = pci_iov_scan_device(dev, id, bus); + if (IS_ERR(virtfn)) { + rc = PTR_ERR(virtfn); + goto failed0; + }
virtfn->dev.parent = dev->dev.parent; virtfn->multifunction = 0; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index b46ce1a2c554..0e7eb2a42d88 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1556,7 +1556,7 @@ static ssize_t __resource_resize_store(struct device *dev, int n, return -EINVAL;
device_lock(dev); - if (dev->driver) { + if (dev->driver || pci_num_vf(pdev)) { ret = -EBUSY; goto unlock; } @@ -1578,7 +1578,7 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
pci_remove_resource_files(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) { + for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { if (pci_resource_len(pdev, i) && pci_resource_flags(pdev, i) == flags) pci_release_resource(pdev, i); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 869d204a70a3..3e78cf86ef03 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -954,8 +954,10 @@ struct pci_acs { };
static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps, - const char *p, u16 mask, u16 flags) + const char *p, const u16 acs_mask, const u16 acs_flags) { + u16 flags = acs_flags; + u16 mask = acs_mask; char *delimit; int ret = 0;
@@ -963,7 +965,7 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps, return;
while (*p) { - if (!mask) { + if (!acs_mask) { /* Check for ACS flags */ delimit = strstr(p, "@"); if (delimit) { @@ -971,6 +973,8 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps, u32 shift = 0;
end = delimit - p - 1; + mask = 0; + flags = 0;
while (end > -1) { if (*(p + end) == '0') { @@ -1027,10 +1031,14 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
pci_dbg(dev, "ACS mask = %#06x\n", mask); pci_dbg(dev, "ACS flags = %#06x\n", flags); + pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl); + pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
- /* If mask is 0 then we copy the bit from the firmware setting. */ - caps->ctrl = (caps->ctrl & ~mask) | (caps->fw_ctrl & mask); - caps->ctrl |= flags; + /* + * For mask bits that are 0, copy them from the firmware setting + * and apply flags for all the mask bits that are 1. + */ + caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl); } @@ -5405,6 +5413,8 @@ static bool pci_bus_resettable(struct pci_bus *bus) return false;
list_for_each_entry(dev, &bus->devices, bus_list) { + if (!pci_reset_supported(dev)) + return false; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; @@ -5481,6 +5491,8 @@ static bool pci_slot_resettable(struct pci_slot *slot) list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; + if (!pci_reset_supported(dev)) + return false; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index da3e7edcf49d..29fcb0689a91 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1270,16 +1270,16 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) parent_link = link->parent;
/* - * link->downstream is a pointer to the pci_dev of function 0. If - * we remove that function, the pci_dev is about to be deallocated, - * so we can't use link->downstream again. Free the link state to - * avoid this. + * Free the parent link state, no later than function 0 (i.e. + * link->downstream) being removed. * - * If we're removing a non-0 function, it's possible we could - * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends - * programming the same ASPM Control value for all functions of - * multi-function devices, so disable ASPM for all of them. + * Do not free the link state any earlier. If function 0 is a + * switch upstream port, this link state is parent_link to all + * subordinate ones. */ + if (pdev != link->downstream) + goto out; + pcie_config_aspm_link(link, 0); list_del(&link->sibling); free_link_state(link); @@ -1290,6 +1290,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) pcie_config_aspm_path(parent_link); }
+ out: mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); } diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c index 0a5e7efbce2c..d8d2aa85a229 100644 --- a/drivers/pci/pcie/bwctrl.c +++ b/drivers/pci/pcie/bwctrl.c @@ -113,7 +113,7 @@ static u16 pcie_bwctrl_select_speed(struct pci_dev *port, enum pci_bus_speed spe up_read(&pci_bus_sem); } if (!supported_speeds) - return PCI_EXP_LNKCAP2_SLS_2_5GB; + supported_speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
return pcie_supported_speeds2target_speed(supported_speeds & desired_speeds); } @@ -294,6 +294,10 @@ static int pcie_bwnotif_probe(struct pcie_device *srv) struct pci_dev *port = srv->port; int ret;
+ /* Can happen if we run out of bus numbers during enumeration. */ + if (!port->subordinate) + return -ENODEV; + struct pcie_bwctrl_data *data = devm_kzalloc(&srv->device, sizeof(*data), GFP_KERNEL); if (!data) diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c index 02e73099bad0..e8318fd5f6ed 100644 --- a/drivers/pci/pcie/portdrv.c +++ b/drivers/pci/pcie/portdrv.c @@ -228,10 +228,12 @@ static int get_port_device_capability(struct pci_dev *dev)
/* * Disable hot-plug interrupts in case they have been enabled - * by the BIOS and the hot-plug service driver is not loaded. + * by the BIOS and the hot-plug service driver won't be loaded + * to handle them. */ - pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); + if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); }
#ifdef CONFIG_PCIEAER diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 246744d8d268..0154b48bfbd7 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -996,10 +996,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) /* Temporarily move resources off the list */ list_splice_init(&bridge->windows, &resources); err = device_add(&bridge->dev); - if (err) { - put_device(&bridge->dev); + if (err) goto free; - } + bus->bridge = get_device(&bridge->dev); device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 5e00cecf1f1a..8707c5b08cf3 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -927,9 +927,14 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
size0 = calculate_iosize(size, min_size, size1, 0, 0, resource_size(b_res), min_align); - size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 : - calculate_iosize(size, min_size, size1, add_size, children_add_size, - resource_size(b_res), min_align); + + size1 = size0; + if (realloc_head && (add_size > 0 || children_add_size > 0)) { + size1 = calculate_iosize(size, min_size, size1, add_size, + children_add_size, resource_size(b_res), + min_align); + } + if (!size0 && !size1) { if (bus->self && (b_res->start || b_res->end)) pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n", @@ -1058,7 +1063,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, struct list_head *realloc_head) { struct pci_dev *dev; - resource_size_t min_align, win_align, align, size, size0, size1; + resource_size_t min_align, win_align, align, size, size0, size1 = 0; resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */ int order, max_order; struct resource *b_res = find_bus_resource_of_type(bus, @@ -1141,7 +1146,6 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, min_align = calculate_mem_align(aligns, max_order); min_align = max(min_align, win_align); size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align); - add_align = max(min_align, add_align);
if (bus->self && size0 && !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type, @@ -1149,14 +1153,28 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, min_align = 1ULL << (max_order + __ffs(SZ_1M)); min_align = max(min_align, win_align); size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align); - add_align = win_align; pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n", b_res, &bus->busn_res); }
- size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 : - calculate_memsize(size, min_size, add_size, children_add_size, - resource_size(b_res), add_align); + if (realloc_head && (add_size > 0 || children_add_size > 0)) { + add_align = max(min_align, add_align); + size1 = calculate_memsize(size, min_size, add_size, children_add_size, + resource_size(b_res), add_align); + + if (bus->self && size1 && + !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type, + size1, add_align)) { + min_align = 1ULL << (max_order + __ffs(SZ_1M)); + min_align = max(min_align, win_align); + size1 = calculate_memsize(size, min_size, add_size, children_add_size, + resource_size(b_res), win_align); + pci_info(bus->self, + "bridge window %pR to %pR requires relaxed alignment rules\n", + b_res, &bus->busn_res); + } + } + if (!size0 && !size1) { if (bus->self && (b_res->start || b_res->end)) pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n", @@ -2102,8 +2120,7 @@ pci_root_bus_distribute_available_resources(struct pci_bus *bus, * in case of root bus. */ if (bridge && pci_bridge_resources_not_assigned(dev)) - pci_bridge_distribute_available_resources(bridge, - add_list); + pci_bridge_distribute_available_resources(dev, add_list); else pci_root_bus_distribute_available_resources(b, add_list); } diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index 0965b9d4f9cf..2fb4f297fda3 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -263,11 +263,22 @@ enum rk_hdptx_reset { RST_MAX };
+#define MAX_HDPTX_PHY_NUM 2 + +struct rk_hdptx_phy_cfg { + unsigned int num_phys; + unsigned int phy_ids[MAX_HDPTX_PHY_NUM]; +}; + struct rk_hdptx_phy { struct device *dev; struct regmap *regmap; struct regmap *grf;
+ /* PHY const config */ + const struct rk_hdptx_phy_cfg *cfgs; + int phy_id; + struct phy *phy; struct phy_config *phy_cfg; struct clk_bulk_data *clks; @@ -1007,15 +1018,14 @@ static int rk_hdptx_phy_clk_register(struct rk_hdptx_phy *hdptx) struct device *dev = hdptx->dev; const char *name, *pname; struct clk *refclk; - int ret, id; + int ret;
refclk = devm_clk_get(dev, "ref"); if (IS_ERR(refclk)) return dev_err_probe(dev, PTR_ERR(refclk), "Failed to get ref clock\n");
- id = of_alias_get_id(dev->of_node, "hdptxphy"); - name = id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0"; + name = hdptx->phy_id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0"; pname = __clk_get_name(refclk);
hdptx->hw.init = CLK_HW_INIT(name, pname, &hdptx_phy_clk_ops, @@ -1058,8 +1068,9 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev) struct phy_provider *phy_provider; struct device *dev = &pdev->dev; struct rk_hdptx_phy *hdptx; + struct resource *res; void __iomem *regs; - int ret; + int ret, id;
hdptx = devm_kzalloc(dev, sizeof(*hdptx), GFP_KERNEL); if (!hdptx) @@ -1067,11 +1078,27 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
hdptx->dev = dev;
- regs = devm_platform_ioremap_resource(pdev, 0); + regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(regs)) return dev_err_probe(dev, PTR_ERR(regs), "Failed to ioremap resource\n");
+ hdptx->cfgs = device_get_match_data(dev); + if (!hdptx->cfgs) + return dev_err_probe(dev, -EINVAL, "missing match data\n"); + + /* find the phy-id from the io address */ + hdptx->phy_id = -ENODEV; + for (id = 0; id < hdptx->cfgs->num_phys; id++) { + if (res->start == hdptx->cfgs->phy_ids[id]) { + hdptx->phy_id = id; + break; + } + } + + if (hdptx->phy_id < 0) + return dev_err_probe(dev, -ENODEV, "no matching device found\n"); + ret = devm_clk_bulk_get_all(dev, &hdptx->clks); if (ret < 0) return dev_err_probe(dev, ret, "Failed to get clocks\n"); @@ -1132,8 +1159,19 @@ static const struct dev_pm_ops rk_hdptx_phy_pm_ops = { rk_hdptx_phy_runtime_resume, NULL) };
+static const struct rk_hdptx_phy_cfg rk3588_hdptx_phy_cfgs = { + .num_phys = 2, + .phy_ids = { + 0xfed60000, + 0xfed70000, + }, +}; + static const struct of_device_id rk_hdptx_phy_of_match[] = { - { .compatible = "rockchip,rk3588-hdptx-phy", }, + { + .compatible = "rockchip,rk3588-hdptx-phy", + .data = &rk3588_hdptx_phy_cfgs + }, {} }; MODULE_DEVICE_TABLE(of, rk_hdptx_phy_of_match); diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index cc1fe0555e19..eaeec096bc9a 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -346,14 +346,14 @@ static int bcm2835_gpio_get_direction(struct gpio_chip *chip, unsigned int offse struct bcm2835_pinctrl *pc = gpiochip_get_data(chip); enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, offset);
- /* Alternative function doesn't clearly provide a direction */ - if (fsel > BCM2835_FSEL_GPIO_OUT) - return -EINVAL; - - if (fsel == BCM2835_FSEL_GPIO_IN) - return GPIO_LINE_DIRECTION_IN; + if (fsel == BCM2835_FSEL_GPIO_OUT) + return GPIO_LINE_DIRECTION_OUT;
- return GPIO_LINE_DIRECTION_OUT; + /* + * Alternative function doesn't clearly provide a direction. Default + * to INPUT. + */ + return GPIO_LINE_DIRECTION_IN; }
static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 527e4b87ae52..f8b0221055e4 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -1543,7 +1543,6 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl, .clk_rate = 19200000, .npwm = 1, .base_unit_bits = 22, - .bypass = true, }; struct pwm_chip *chip;
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c index d09a5e9b2eca..f6a1e684a386 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c +++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c @@ -1290,12 +1290,14 @@ static struct npcm8xx_func npcm8xx_funcs[] = { };
#define NPCM8XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q) \ - [a] { .fn0 = fn_ ## b, .reg0 = NPCM8XX_GCR_ ## c, .bit0 = d, \ + [a] = { \ + .flag = q, \ + .fn0 = fn_ ## b, .reg0 = NPCM8XX_GCR_ ## c, .bit0 = d, \ .fn1 = fn_ ## e, .reg1 = NPCM8XX_GCR_ ## f, .bit1 = g, \ .fn2 = fn_ ## h, .reg2 = NPCM8XX_GCR_ ## i, .bit2 = j, \ .fn3 = fn_ ## k, .reg3 = NPCM8XX_GCR_ ## l, .bit3 = m, \ .fn4 = fn_ ## n, .reg4 = NPCM8XX_GCR_ ## o, .bit4 = p, \ - .flag = q } + }
/* Drive strength controlled by NPCM8XX_GP_N_ODSC */ #define DRIVE_STRENGTH_LO_SHIFT 8 @@ -2361,8 +2363,8 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl) return dev_err_probe(dev, ret, "gpio-ranges fail for GPIO bank %u\n", id);
ret = fwnode_irq_get(child, 0); - if (!ret) - return dev_err_probe(dev, ret, "No IRQ for GPIO bank %u\n", id); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to retrieve IRQ for bank %u\n", id);
pctrl->gpio_bank[id].irq = ret; pctrl->gpio_bank[id].irq_chip = npcmgpio_irqchip; diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c index dd1f8c29d3e7..8b36161c7c50 100644 --- a/drivers/pinctrl/renesas/pinctrl-rza2.c +++ b/drivers/pinctrl/renesas/pinctrl-rza2.c @@ -256,6 +256,8 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv) return ret; }
+ of_node_put(of_args.np); + if ((of_args.args[0] != 0) || (of_args.args[1] != 0) || (of_args.args[2] != priv->npins)) { diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index ce4a07a3df49..d1da7f53fc60 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -2756,6 +2756,8 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) if (ret) return dev_err_probe(pctrl->dev, ret, "Unable to parse gpio-ranges\n");
+ of_node_put(of_args.np); + if (of_args.args[0] != 0 || of_args.args[1] != 0 || of_args.args[2] != pctrl->data->n_port_pins) return dev_err_probe(pctrl->dev, -EINVAL, @@ -3386,6 +3388,7 @@ static struct platform_driver rzg2l_pinctrl_driver = { .name = DRV_NAME, .of_match_table = of_match_ptr(rzg2l_pinctrl_of_table), .pm = pm_sleep_ptr(&rzg2l_pinctrl_pm_ops), + .suppress_bind_attrs = true, }, .probe = rzg2l_pinctrl_probe, }; diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c index 4062c56619f5..8c7169db4fcc 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c +++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c @@ -940,6 +940,8 @@ static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl) return ret; }
+ of_node_put(of_args.np); + if (of_args.args[0] != 0 || of_args.args[1] != 0 || of_args.args[2] != pctrl->data->n_port_pins) { dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n"); diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index c83e5a65e680..3b046450bd3f 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -270,6 +270,9 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, val = pmx_readl(pmx, g->mux_bank, g->mux_reg); val &= ~(0x3 << g->mux_bit); val |= i << g->mux_bit; + /* Set the SFIO/GPIO selection to SFIO when under pinmux control*/ + if (pmx->soc->sfsel_in_mux) + val |= (1 << g->sfsel_bit); pmx_writel(pmx, val, g->mux_bank, g->mux_reg);
return 0; diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c index 50002ef13d5d..8f868f845350 100644 --- a/drivers/platform/x86/dell/dell-uart-backlight.c +++ b/drivers/platform/x86/dell/dell-uart-backlight.c @@ -325,7 +325,7 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev) return PTR_ERR_OR_ZERO(dell_bl->bl); }
-struct serdev_device_driver dell_uart_bl_serdev_driver = { +static struct serdev_device_driver dell_uart_bl_serdev_driver = { .probe = dell_uart_bl_serdev_probe, .driver = { .name = KBUILD_MODNAME, diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c index e75cd6e1efe6..ab5f7d3ab824 100644 --- a/drivers/platform/x86/dell/dell-wmi-ddv.c +++ b/drivers/platform/x86/dell/dell-wmi-ddv.c @@ -665,8 +665,10 @@ static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char if (ret < 0) return ret;
- /* Use 2731 instead of 2731.5 to avoid unnecessary rounding */ - return sysfs_emit(buf, "%d\n", value - 2731); + /* Use 2732 instead of 2731.5 to avoid unnecessary rounding and to emulate + * the behaviour of the OEM application which seems to round down the result. + */ + return sysfs_emit(buf, "%d\n", value - 2732); }
static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index dbcd3087aaa4..31239a93dd71 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -84,7 +84,7 @@ static DECLARE_HASHTABLE(isst_hash, 8); static DEFINE_MUTEX(isst_hash_lock);
static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param, - u32 data) + u64 data) { struct isst_cmd *sst_cmd;
diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c index a96b215cd2c5..25933cd018d1 100644 --- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c +++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c @@ -219,7 +219,7 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev) return 0; }
-struct serdev_device_driver yt2_1380_fc_serdev_driver = { +static struct serdev_device_driver yt2_1380_fc_serdev_driver = { .probe = yt2_1380_fc_serdev_probe, .driver = { .name = KBUILD_MODNAME, diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 1cc91173e012..2ff38ca9ddb4 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8797,6 +8797,7 @@ static const struct attribute_group fan_driver_attr_group = { #define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */ #define TPACPI_FAN_DECRPM 0x0020 /* For ECFW's with RPM in register as decimal */ #define TPACPI_FAN_TPR 0x0040 /* Fan speed is in Ticks Per Revolution */ +#define TPACPI_FAN_NOACPI 0x0080 /* Don't use ACPI methods even if detected */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1), @@ -8827,6 +8828,9 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */ TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */ TPACPI_Q_LNV('8', 'F', TPACPI_FAN_TPR), /* ThinkPad x120e */ + TPACPI_Q_LNV3('R', '0', '0', TPACPI_FAN_NOACPI),/* E560 */ + TPACPI_Q_LNV3('R', '1', '2', TPACPI_FAN_NOACPI),/* T495 */ + TPACPI_Q_LNV3('R', '1', '3', TPACPI_FAN_NOACPI),/* T495s */ };
static int __init fan_init(struct ibm_init_struct *iibm) @@ -8878,6 +8882,13 @@ static int __init fan_init(struct ibm_init_struct *iibm) tp_features.fan_ctrl_status_undef = 1; }
+ if (quirks & TPACPI_FAN_NOACPI) { + /* E560, T495, T495s */ + pr_info("Ignoring buggy ACPI fan access method\n"); + fang_handle = NULL; + fanw_handle = NULL; + } + if (gfan_handle) { /* 570, 600e/x, 770e, 770x */ fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 90a5bccfc6b9..a8d8bcaace2f 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1918,7 +1918,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) cache.flags = -1; /* read error */ if (cache.flags >= 0) { cache.capacity = bq27xxx_battery_read_soc(di); - di->cache.flags = cache.flags;
/* * On gauges with signed current reporting the current must be diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c index cdea35c0d1de..027d6a539b65 100644 --- a/drivers/power/supply/max77693_charger.c +++ b/drivers/power/supply/max77693_charger.c @@ -608,7 +608,7 @@ static int max77693_set_charge_input_threshold_volt(struct max77693_charger *chg case 4700000: case 4800000: case 4900000: - data = (uvolt - 4700000) / 100000; + data = ((uvolt - 4700000) / 100000) + 1; break; default: dev_err(chg->dev, "Wrong value for charge input voltage regulation threshold\n"); diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index b651087f426f..4a87af0980d6 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -2090,6 +2090,10 @@ ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen, { struct ptp_ocp_signal s = { };
+ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + s.polarity = bp->signal[gen].polarity; s.period = ktime_set(req->period.sec, req->period.nsec); if (!s.period) diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index faa6b79c27d7..dfe1dd93d56f 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -460,7 +460,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts), .vsel_reg = PCA9450_REG_LDO5CTRL_H, .vsel_mask = LDO5HOUT_MASK, - .enable_reg = PCA9450_REG_LDO5CTRL_H, + .enable_reg = PCA9450_REG_LDO5CTRL_L, .enable_mask = LDO5H_EN_MASK, .owner = THIS_MODULE, }, @@ -674,7 +674,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts), .vsel_reg = PCA9450_REG_LDO5CTRL_H, .vsel_mask = LDO5HOUT_MASK, - .enable_reg = PCA9450_REG_LDO5CTRL_H, + .enable_reg = PCA9450_REG_LDO5CTRL_L, .enable_mask = LDO5H_EN_MASK, .owner = THIS_MODULE, }, @@ -864,7 +864,7 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = { .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts), .vsel_reg = PCA9450_REG_LDO5CTRL_H, .vsel_mask = LDO5HOUT_MASK, - .enable_reg = PCA9450_REG_LDO5CTRL_H, + .enable_reg = PCA9450_REG_LDO5CTRL_L, .enable_mask = LDO5H_EN_MASK, .owner = THIS_MODULE, }, diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index e78bd986dc3f..2c80d7fe39f8 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1831,6 +1831,13 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs, while (pd_names[num_pds]) num_pds++;
+ /* Handle single power domain */ + if (num_pds == 1 && dev->pm_domain) { + devs[0] = dev; + pm_runtime_enable(dev); + return 1; + } + for (i = 0; i < num_pds; i++) { devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); if (IS_ERR_OR_NULL(devs[i])) { @@ -1851,8 +1858,15 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs, static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, size_t pd_count) { + struct device *dev = qproc->dev; int i;
+ /* Handle single power domain */ + if (pd_count == 1 && dev->pm_domain) { + pm_runtime_disable(dev); + return; + } + for (i = 0; i < pd_count; i++) dev_pm_domain_detach(pds[i], false); } @@ -2449,13 +2463,13 @@ static const struct rproc_hexagon_res msm8974_mss = { .supply = "pll", .uA = 100000, }, - {} - }, - .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "mx", .uV = 1050000, }, + {} + }, + .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "cx", .uA = 100000, @@ -2481,7 +2495,6 @@ static const struct rproc_hexagon_res msm8974_mss = { NULL }, .proxy_pd_names = (char*[]){ - "mx", "cx", NULL }, diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 97c4bdd9222a..60923ed12904 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -501,16 +501,16 @@ static int adsp_pds_attach(struct device *dev, struct device **devs, if (!pd_names) return 0;
+ while (pd_names[num_pds]) + num_pds++; + /* Handle single power domain */ - if (dev->pm_domain) { + if (num_pds == 1 && dev->pm_domain) { devs[0] = dev; pm_runtime_enable(dev); return 1; }
- while (pd_names[num_pds]) - num_pds++; - for (i = 0; i < num_pds; i++) { devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); if (IS_ERR_OR_NULL(devs[i])) { @@ -535,7 +535,7 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds, int i;
/* Handle single power domain */ - if (dev->pm_domain && pd_count) { + if (pd_count == 1 && dev->pm_domain) { pm_runtime_disable(dev); return; } @@ -1348,6 +1348,7 @@ static const struct adsp_data sc7280_wpss_resource = { .crash_reason_smem = 626, .firmware_name = "wpss.mdt", .pas_id = 6, + .minidump_id = 4, .auto_boot = false, .proxy_pd_names = (char*[]){ "cx", @@ -1410,7 +1411,7 @@ static const struct adsp_data sm8650_mpss_resource = { };
static const struct of_device_id adsp_of_match[] = { - { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init}, + { .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource}, { .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource}, { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init}, { .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource}, diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index c2cf0d277729..b21eedefff87 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -2025,6 +2025,7 @@ int rproc_shutdown(struct rproc *rproc) kfree(rproc->cached_table); rproc->cached_table = NULL; rproc->table_ptr = NULL; + rproc->table_sz = 0; out: mutex_unlock(&rproc->lock); return ret; diff --git a/drivers/rtc/rtc-renesas-rtca3.c b/drivers/rtc/rtc-renesas-rtca3.c index a056291d3887..ab816bdf0d77 100644 --- a/drivers/rtc/rtc-renesas-rtca3.c +++ b/drivers/rtc/rtc-renesas-rtca3.c @@ -586,17 +586,14 @@ static int rtca3_initial_setup(struct clk *clk, struct rtca3_priv *priv) */ usleep_range(sleep_us, sleep_us + 10);
- /* Disable all interrupts. */ - mask = RTCA3_RCR1_AIE | RTCA3_RCR1_CIE | RTCA3_RCR1_PIE; - ret = rtca3_alarm_irq_set_helper(priv, mask, 0); - if (ret) - return ret; - mask = RTCA3_RCR2_START | RTCA3_RCR2_HR24; val = readb(priv->base + RTCA3_RCR2); - /* Nothing to do if already started in 24 hours and calendar count mode. */ - if ((val & mask) == mask) - return 0; + /* Only disable the interrupts if already started in 24 hours and calendar count mode. */ + if ((val & mask) == mask) { + /* Disable all interrupts. */ + mask = RTCA3_RCR1_AIE | RTCA3_RCR1_CIE | RTCA3_RCR1_PIE; + return rtca3_alarm_irq_set_helper(priv, mask, 0); + }
/* Reconfigure the RTC in 24 hours and calendar count mode. */ mask = RTCA3_RCR2_START | RTCA3_RCR2_CNTMD; diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 2d438d722d0b..e17f5d8226bf 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -633,8 +633,7 @@ extern struct dentry *hisi_sas_debugfs_dir; extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba); extern int hisi_sas_alloc(struct hisi_hba *hisi_hba); extern void hisi_sas_free(struct hisi_hba *hisi_hba); -extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, - int direction); +extern u8 hisi_sas_get_ata_protocol(struct sas_task *task); extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); extern void hisi_sas_sata_done(struct sas_task *task, struct hisi_sas_slot *slot); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index da4a2ed8ee86..3596414d970b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -21,8 +21,32 @@ struct hisi_sas_internal_abort_data { bool rst_ha_timeout; /* reset the HA for timeout */ };
-u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) +static u8 hisi_sas_get_ata_protocol_from_tf(struct ata_queued_cmd *qc) { + if (!qc) + return HISI_SAS_SATA_PROTOCOL_PIO; + + switch (qc->tf.protocol) { + case ATA_PROT_NODATA: + return HISI_SAS_SATA_PROTOCOL_NONDATA; + case ATA_PROT_PIO: + return HISI_SAS_SATA_PROTOCOL_PIO; + case ATA_PROT_DMA: + return HISI_SAS_SATA_PROTOCOL_DMA; + case ATA_PROT_NCQ_NODATA: + case ATA_PROT_NCQ: + return HISI_SAS_SATA_PROTOCOL_FPDMA; + default: + return HISI_SAS_SATA_PROTOCOL_PIO; + } +} + +u8 hisi_sas_get_ata_protocol(struct sas_task *task) +{ + struct host_to_dev_fis *fis = &task->ata_task.fis; + struct ata_queued_cmd *qc = task->uldd_task; + int direction = task->data_dir; + switch (fis->command) { case ATA_CMD_FPDMA_WRITE: case ATA_CMD_FPDMA_READ: @@ -93,7 +117,7 @@ u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) { if (direction == DMA_NONE) return HISI_SAS_SATA_PROTOCOL_NONDATA; - return HISI_SAS_SATA_PROTOCOL_PIO; + return hisi_sas_get_ata_protocol_from_tf(qc); } } } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 71cd5b4450c2..6e7f99fcc824 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2538,9 +2538,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, (task->ata_task.fis.control & ATA_SRST)) dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (hisi_sas_get_ata_protocol( - &task->ata_task.fis, task->data_dir)) - << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; hdr->dw1 = cpu_to_le32(dw1);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 48b95d9a7927..095bbf80c34e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -1456,9 +1456,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, (task->ata_task.fis.control & ATA_SRST)) dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (hisi_sas_get_ata_protocol( - &task->ata_task.fis, task->data_dir)) - << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index 7589f48aebc8..f4b5813e6fc4 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -2339,6 +2339,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) }
if (!mrioc->ioctl_sges_allocated) { + mutex_unlock(&mrioc->bsg_cmds.mutex); dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n", __func__); return -ENOMEM; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index dc43cfa83088..212e3b86bb81 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -8018,7 +8018,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
mutex_lock(&ioc->hostdiag_unlock_mutex); if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic)) - goto out; + goto unlock;
hcb_size = ioc->base_readl(&ioc->chip->HCBSize); drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n")); @@ -8038,7 +8038,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) ioc_info(ioc, "Invalid host diagnostic register value\n"); _base_dump_reg_set(ioc); - goto out; + goto unlock; } if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) break; @@ -8074,17 +8074,19 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", __func__, ioc_state); _base_dump_reg_set(ioc); - goto out; + goto fail; }
pci_cfg_access_unlock(ioc->pdev); ioc_info(ioc, "diag reset: SUCCESS\n"); return 0;
- out: +unlock: + mutex_unlock(&ioc->hostdiag_unlock_mutex); + +fail: pci_cfg_access_unlock(ioc->pdev); ioc_err(ioc, "diag reset: FAILED\n"); - mutex_unlock(&ioc->hostdiag_unlock_mutex); return -EFAULT; }
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index a456e5ec74d8..9c2d3178f384 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2703,7 +2703,7 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) ssp_target = 1; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SEP) { - sdev_printk(KERN_WARNING, sdev, + sdev_printk(KERN_INFO, sdev, "set ignore_delay_remove for handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); sas_device_priv_data->ignore_delay_remove = 1; diff --git a/drivers/soc/mediatek/mt8167-mmsys.h b/drivers/soc/mediatek/mt8167-mmsys.h index f7a35b3656bb..655ef962abe9 100644 --- a/drivers/soc/mediatek/mt8167-mmsys.h +++ b/drivers/soc/mediatek/mt8167-mmsys.h @@ -17,18 +17,23 @@ static const struct mtk_mmsys_routes mt8167_mmsys_routing_table[] = { { DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0, MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0, + OVL0_MOUT_EN_COLOR0 }, { DDP_COMPONENT_DITHER0, DDP_COMPONENT_RDMA0, - MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0 + MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0, + MT8167_DITHER_MOUT_EN_RDMA0 }, { DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0, - MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0 + MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0, + COLOR0_SEL_IN_OVL0 }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0, - MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0 + MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0, + MT8167_DSI0_SEL_IN_RDMA0 }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0, - MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0 + MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0, + MT8167_RDMA0_SOUT_DSI0 }, };
diff --git a/drivers/soc/mediatek/mt8188-mmsys.h b/drivers/soc/mediatek/mt8188-mmsys.h index 6bebf1a69fc0..a1d63be0a73d 100644 --- a/drivers/soc/mediatek/mt8188-mmsys.h +++ b/drivers/soc/mediatek/mt8188-mmsys.h @@ -343,7 +343,7 @@ static const struct mtk_mmsys_routes mmsys_mt8188_vdo1_routing_table[] = { MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT }, { DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1, - MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0), + MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0), MT8188_MERGE4_SOUT_TO_DPI1_SEL }, { DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1, diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h index 7abaf048d91e..ae37945e6c67 100644 --- a/drivers/soc/mediatek/mt8365-mmsys.h +++ b/drivers/soc/mediatek/mt8365-mmsys.h @@ -14,8 +14,9 @@ #define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8 #define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc
+#define MT8365_DISP_MS_IN_OUT_MASK GENMASK(3, 0) #define MT8365_RDMA0_SOUT_COLOR0 0x1 -#define MT8365_DITHER_MOUT_EN_DSI0 0x1 +#define MT8365_DITHER_MOUT_EN_DSI0 BIT(0) #define MT8365_DSI0_SEL_IN_DITHER 0x1 #define MT8365_RDMA0_SEL_IN_OVL0 0x0 #define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0 @@ -30,52 +31,43 @@ static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = { { DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, - MT8365_OVL0_MOUT_PATH0_SEL, MT8365_OVL0_MOUT_PATH0_SEL - }, - { + MT8365_DISP_MS_IN_OUT_MASK, MT8365_OVL0_MOUT_PATH0_SEL + }, { DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN, - MT8365_RDMA0_SEL_IN_OVL0, MT8365_RDMA0_SEL_IN_OVL0 - }, - { + MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SEL_IN_OVL0 + }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0, MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL, - MT8365_RDMA0_SOUT_COLOR0, MT8365_RDMA0_SOUT_COLOR0 - }, - { + MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SOUT_COLOR0 + }, { DDP_COMPONENT_COLOR0, DDP_COMPONENT_CCORR, MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, - MT8365_DISP_COLOR_SEL_IN_COLOR0,MT8365_DISP_COLOR_SEL_IN_COLOR0 - }, - { + MT8365_DISP_MS_IN_OUT_MASK, MT8365_DISP_COLOR_SEL_IN_COLOR0 + }, { DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0, MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN, - MT8365_DITHER_MOUT_EN_DSI0, MT8365_DITHER_MOUT_EN_DSI0 - }, - { + MT8365_DISP_MS_IN_OUT_MASK, MT8365_DITHER_MOUT_EN_DSI0 + }, { DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0, MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, - MT8365_DSI0_SEL_IN_DITHER, MT8365_DSI0_SEL_IN_DITHER - }, - { + MT8365_DISP_MS_IN_OUT_MASK, MT8365_DSI0_SEL_IN_DITHER + }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0, MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN, - MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 - }, - { + MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 + }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK - }, - { + }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN, - MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1 - }, - { + MT8365_DISP_MS_IN_OUT_MASK, MT8365_DPI0_SEL_IN_RDMA1 + }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL, - MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0 + MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA1_SOUT_DPI0 }, };
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c index 59965f43c2fb..f78a2a16581a 100644 --- a/drivers/soundwire/generic_bandwidth_allocation.c +++ b/drivers/soundwire/generic_bandwidth_allocation.c @@ -194,10 +194,11 @@ static int sdw_compute_group_params(struct sdw_bus *bus, continue; } else { /* - * Include runtimes with running (ENABLED state) and paused (DISABLED state) - * streams + * Include runtimes with running (ENABLED/PREPARED state) and + * paused (DISABLED state) streams */ if (m_rt->stream->state != SDW_STREAM_ENABLED && + m_rt->stream->state != SDW_STREAM_PREPARED && m_rt->stream->state != SDW_STREAM_DISABLED) continue; } diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c index 4869b073b11c..d2d99555ec5a 100644 --- a/drivers/soundwire/slave.c +++ b/drivers/soundwire/slave.c @@ -13,6 +13,7 @@ static void sdw_slave_release(struct device *dev) { struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ of_node_put(slave->dev.of_node); mutex_destroy(&slave->sdw_dev_lock); kfree(slave); } diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index c85997478b81..17fc0b17e756 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -302,7 +302,7 @@ static void amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz) { unsigned int i, spd7_val, alt_spd;
- for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++) + for (i = 0; i < ARRAY_SIZE(amd_spi_freq)-1; i++) if (speed_hz >= amd_spi_freq[i].speed_hz) break;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 0d1aa6592484..77de5a07639a 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -1162,7 +1162,8 @@ static void bcm2835_spi_cleanup(struct spi_device *spi) sizeof(u32), DMA_TO_DEVICE);
- gpiod_put(bs->cs_gpio); + if (!IS_ERR(bs->cs_gpio)) + gpiod_put(bs->cs_gpio); spi_set_csgpiod(spi, 0, NULL);
kfree(target); @@ -1225,7 +1226,12 @@ static int bcm2835_spi_setup(struct spi_device *spi) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); struct bcm2835_spidev *target = spi_get_ctldata(spi); struct gpiod_lookup_table *lookup __free(kfree) = NULL; - int ret; + const char *pinctrl_compats[] = { + "brcm,bcm2835-gpio", + "brcm,bcm2711-gpio", + "brcm,bcm7211-gpio", + }; + int ret, i; u32 cs;
if (!target) { @@ -1290,6 +1296,14 @@ static int bcm2835_spi_setup(struct spi_device *spi) goto err_cleanup; }
+ for (i = 0; i < ARRAY_SIZE(pinctrl_compats); i++) { + if (of_find_compatible_node(NULL, NULL, pinctrl_compats[i])) + break; + } + + if (i == ARRAY_SIZE(pinctrl_compats)) + return 0; + /* * TODO: The code below is a slightly better alternative to the utter * abuse of the GPIO API that I found here before. It creates a diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c index aed98ab14334..6dcba0e0ddaa 100644 --- a/drivers/spi/spi-cadence-xspi.c +++ b/drivers/spi/spi-cadence-xspi.c @@ -432,7 +432,7 @@ static bool cdns_mrvl_xspi_setup_clock(struct cdns_xspi_dev *cdns_xspi, u32 clk_reg; bool update_clk = false;
- while (i < ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list)) { + while (i < (ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list) - 1)) { clk_val = MRVL_XSPI_CLOCK_DIVIDED( cdns_mrvl_xspi_clk_div_list[i]); if (clk_val <= requested_clk) diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c index 3f4f95b7fe34..c62407077d37 100644 --- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c +++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c @@ -848,6 +848,7 @@ static gpib_interface_t agilent_82350b_unaccel_interface = { .primary_address = agilent_82350b_primary_address, .secondary_address = agilent_82350b_secondary_address, .serial_poll_response = agilent_82350b_serial_poll_response, + .serial_poll_status = agilent_82350b_serial_poll_status, .t1_delay = agilent_82350b_t1_delay, .return_to_local = agilent_82350b_return_to_local, }; @@ -875,6 +876,7 @@ static gpib_interface_t agilent_82350b_interface = { .primary_address = agilent_82350b_primary_address, .secondary_address = agilent_82350b_secondary_address, .serial_poll_response = agilent_82350b_serial_poll_response, + .serial_poll_status = agilent_82350b_serial_poll_status, .t1_delay = agilent_82350b_t1_delay, .return_to_local = agilent_82350b_return_to_local, }; diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c index 69f0e490d401..e0d36f0dff25 100644 --- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c +++ b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c @@ -7,6 +7,10 @@
#define _GNU_SOURCE
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define dev_fmt pr_fmt +#define DRV_NAME KBUILD_MODNAME + #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -79,14 +83,12 @@ static int agilent_82357a_send_bulk_msg(struct agilent_82357a_priv *a_priv, void
retval = usb_submit_urb(a_priv->bulk_urb, GFP_KERNEL); if (retval) { - dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n", - __func__, retval); + dev_err(&usb_dev->dev, "failed to submit bulk out urb, retval=%i\n", retval); mutex_unlock(&a_priv->bulk_alloc_lock); goto cleanup; } mutex_unlock(&a_priv->bulk_alloc_lock); if (down_interruptible(&context->complete)) { - dev_err(&usb_dev->dev, "%s: interrupted\n", __func__); retval = -ERESTARTSYS; goto cleanup; } @@ -149,14 +151,12 @@ static int agilent_82357a_receive_bulk_msg(struct agilent_82357a_priv *a_priv, v
retval = usb_submit_urb(a_priv->bulk_urb, GFP_KERNEL); if (retval) { - dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n", - __func__, retval); + dev_err(&usb_dev->dev, "failed to submit bulk in urb, retval=%i\n", retval); mutex_unlock(&a_priv->bulk_alloc_lock); goto cleanup; } mutex_unlock(&a_priv->bulk_alloc_lock); if (down_interruptible(&context->complete)) { - dev_err(&usb_dev->dev, "%s: interrupted\n", __func__); retval = -ERESTARTSYS; goto cleanup; } @@ -205,7 +205,6 @@ static int agilent_82357a_receive_control_msg(struct agilent_82357a_priv *a_priv
static void agilent_82357a_dump_raw_block(const u8 *raw_data, int length) { - pr_info("hex block dump\n"); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 8, 1, raw_data, length, true); }
@@ -225,7 +224,7 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv, static const int max_writes = 31;
if (num_writes > max_writes) { - dev_err(&usb_dev->dev, "%s: bug! num_writes=%i too large\n", __func__, num_writes); + dev_err(&usb_dev->dev, "bug! num_writes=%i too large\n", num_writes); return -EIO; } out_data_length = num_writes * bytes_per_write + header_length; @@ -239,8 +238,7 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv, out_data[i++] = writes[j].address; out_data[i++] = writes[j].value; } - if (i > out_data_length) - dev_err(&usb_dev->dev, "%s: bug! buffer overrun\n", __func__); + retval = mutex_lock_interruptible(&a_priv->bulk_transfer_lock); if (retval) { kfree(out_data); @@ -249,8 +247,8 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv, retval = agilent_82357a_send_bulk_msg(a_priv, out_data, i, &bytes_written, 1000); kfree(out_data); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, bytes_written=%i, i=%i\n", - __func__, retval, bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n", + retval, bytes_written, i); mutex_unlock(&a_priv->bulk_transfer_lock); return retval; } @@ -265,20 +263,19 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv, mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); agilent_82357a_dump_raw_block(in_data, bytes_read); kfree(in_data); return -EIO; } if (in_data[0] != (0xff & ~DATA_PIPE_CMD_WR_REGS)) { - dev_err(&usb_dev->dev, "%s: error, bulk command=0x%x != ~DATA_PIPE_CMD_WR_REGS\n", - __func__, in_data[0]); + dev_err(&usb_dev->dev, "bulk command=0x%x != ~DATA_PIPE_CMD_WR_REGS\n", in_data[0]); return -EIO; } if (in_data[1]) { - dev_err(&usb_dev->dev, "%s: nonzero error code 0x%x in DATA_PIPE_CMD_WR_REGS response\n", - __func__, in_data[1]); + dev_err(&usb_dev->dev, "nonzero error code 0x%x in DATA_PIPE_CMD_WR_REGS response\n", + in_data[1]); return -EIO; } kfree(in_data); @@ -299,9 +296,10 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv, static const int header_length = 2; static const int max_reads = 62;
- if (num_reads > max_reads) - dev_err(&usb_dev->dev, "%s: bug! num_reads=%i too large\n", __func__, num_reads); - + if (num_reads > max_reads) { + dev_err(&usb_dev->dev, "bug! num_reads=%i too large\n", num_reads); + return -EIO; + } out_data_length = num_reads + header_length; out_data = kmalloc(out_data_length, GFP_KERNEL); if (!out_data) @@ -311,8 +309,7 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv, out_data[i++] = num_reads; for (j = 0; j < num_reads; j++) out_data[i++] = reads[j].address; - if (i > out_data_length) - dev_err(&usb_dev->dev, "%s: bug! buffer overrun\n", __func__); + if (blocking) { retval = mutex_lock_interruptible(&a_priv->bulk_transfer_lock); if (retval) { @@ -329,8 +326,8 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv, retval = agilent_82357a_send_bulk_msg(a_priv, out_data, i, &bytes_written, 1000); kfree(out_data); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, bytes_written=%i, i=%i\n", - __func__, retval, bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n", + retval, bytes_written, i); mutex_unlock(&a_priv->bulk_transfer_lock); return retval; } @@ -345,21 +342,20 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv, mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); agilent_82357a_dump_raw_block(in_data, bytes_read); kfree(in_data); return -EIO; } i = 0; if (in_data[i++] != (0xff & ~DATA_PIPE_CMD_RD_REGS)) { - dev_err(&usb_dev->dev, "%s: error, bulk command=0x%x != ~DATA_PIPE_CMD_RD_REGS\n", - __func__, in_data[0]); + dev_err(&usb_dev->dev, "bulk command=0x%x != ~DATA_PIPE_CMD_RD_REGS\n", in_data[0]); return -EIO; } if (in_data[i++]) { - dev_err(&usb_dev->dev, "%s: nonzero error code 0x%x in DATA_PIPE_CMD_RD_REGS response\n", - __func__, in_data[1]); + dev_err(&usb_dev->dev, "nonzero error code 0x%x in DATA_PIPE_CMD_RD_REGS response\n", + in_data[1]); return -EIO; } for (j = 0; j < num_reads; j++) @@ -390,14 +386,13 @@ static int agilent_82357a_abort(struct agilent_82357a_priv *a_priv, int flush) wIndex, status_data, status_data_len, 100); if (receive_control_retval < 0) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_control_msg() returned %i\n", - __func__, receive_control_retval); + dev_err(&usb_dev->dev, "82357a_receive_control_msg() returned %i\n", + receive_control_retval); retval = -EIO; goto cleanup; } if (status_data[0] != (~XFER_ABORT & 0xff)) { - dev_err(&usb_dev->dev, "%s: error, major code=0x%x != ~XFER_ABORT\n", - __func__, status_data[0]); + dev_err(&usb_dev->dev, "major code=0x%x != ~XFER_ABORT\n", status_data[0]); retval = -EIO; goto cleanup; } @@ -413,8 +408,7 @@ static int agilent_82357a_abort(struct agilent_82357a_priv *a_priv, int flush) fallthrough; case UGP_ERR_FLUSHING_ALREADY: default: - dev_err(&usb_dev->dev, "%s: abort returned error code=0x%x\n", - __func__, status_data[1]); + dev_err(&usb_dev->dev, "abort returned error code=0x%x\n", status_data[1]); retval = -EIO; break; } @@ -433,7 +427,7 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng { int retval; struct agilent_82357a_priv *a_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); + struct usb_device *usb_dev; u8 *out_data, *in_data; int out_data_length, in_data_length; int bytes_written, bytes_read; @@ -444,6 +438,10 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng
*nbytes = 0; *end = 0; + + if (!a_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(a_priv->bus_interface); out_data_length = 0x9; out_data = kmalloc(out_data_length, GFP_KERNEL); if (!out_data) @@ -469,8 +467,8 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng retval = agilent_82357a_send_bulk_msg(a_priv, out_data, i, &bytes_written, msec_timeout); kfree(out_data); if (retval || bytes_written != i) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, bytes_written=%i, i=%i\n", - __func__, retval, bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n", + retval, bytes_written, i); mutex_unlock(&a_priv->bulk_transfer_lock); if (retval < 0) return retval; @@ -501,19 +499,19 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng &extra_bytes_read, 100); bytes_read += extra_bytes_read; if (extra_bytes_retval) { - dev_err(&usb_dev->dev, "%s: extra_bytes_retval=%i, bytes_read=%i\n", - __func__, extra_bytes_retval, bytes_read); + dev_err(&usb_dev->dev, "extra_bytes_retval=%i, bytes_read=%i\n", + extra_bytes_retval, bytes_read); agilent_82357a_abort(a_priv, 0); } } else if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); agilent_82357a_abort(a_priv, 0); } mutex_unlock(&a_priv->bulk_transfer_lock); if (bytes_read > length + 1) { bytes_read = length + 1; - pr_warn("%s: bytes_read > length? truncating", __func__); + dev_warn(&usb_dev->dev, "bytes_read > length? truncating"); }
if (bytes_read >= 1) { @@ -540,7 +538,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer { int retval; struct agilent_82357a_priv *a_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); + struct usb_device *usb_dev; u8 *out_data = NULL; u8 *status_data = NULL; int out_data_length; @@ -551,6 +549,10 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer struct agilent_82357a_register_pairlet read_reg;
*bytes_written = 0; + if (!a_priv->bus_interface) + return -ENODEV; + + usb_dev = interface_to_usbdev(a_priv->bus_interface); out_data_length = length + 0x8; out_data = kmalloc(out_data_length, GFP_KERNEL); if (!out_data) @@ -584,8 +586,8 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer kfree(out_data); if (retval || raw_bytes_written != i) { agilent_82357a_abort(a_priv, 0); - dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, raw_bytes_written=%i, i=%i\n", - __func__, retval, raw_bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, raw_bytes_written=%i, i=%i\n", + retval, raw_bytes_written, i); mutex_unlock(&a_priv->bulk_transfer_lock); if (retval < 0) return retval; @@ -597,7 +599,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer &a_priv->interrupt_flags) || test_bit(TIMO_NUM, &board->status)); if (retval) { - dev_err(&usb_dev->dev, "%s: wait write complete interrupted\n", __func__); + dev_dbg(&usb_dev->dev, "wait write complete interrupted\n"); agilent_82357a_abort(a_priv, 0); mutex_unlock(&a_priv->bulk_transfer_lock); return -ERESTARTSYS; @@ -614,8 +616,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer read_reg.address = BSR; retval = agilent_82357a_read_registers(a_priv, &read_reg, 1, 1); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "read_registers() returned error\n"); return -ETIMEDOUT; }
@@ -632,8 +633,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer read_reg.address = ADSR; retval = agilent_82357a_read_registers(a_priv, &read_reg, 1, 1); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "read_registers() returned error\n"); return -ETIMEDOUT; } adsr = read_reg.value; @@ -659,8 +659,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer 100); mutex_unlock(&a_priv->bulk_transfer_lock); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_control_msg() returned %i\n", - __func__, retval); + dev_err(&usb_dev->dev, "receive_control_msg() returned %i\n", retval); kfree(status_data); return -EIO; } @@ -699,17 +698,20 @@ int agilent_82357a_take_control_internal(gpib_board_t *board, int synchronous) write.value = AUX_TCA; retval = agilent_82357a_write_registers(a_priv, &write, 1); if (retval) - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval; }
static int agilent_82357a_take_control(gpib_board_t *board, int synchronous) { + struct agilent_82357a_priv *a_priv = board->private_data; const int timeout = 10; int i;
+ if (!a_priv->bus_interface) + return -ENODEV; + /* It looks like the 9914 does not handle tcs properly. * See comment above tms9914_take_control_workaround() in * drivers/gpib/tms9914/tms9914_aux.c @@ -733,16 +735,19 @@ static int agilent_82357a_take_control(gpib_board_t *board, int synchronous) static int agilent_82357a_go_to_standby(gpib_board_t *board) { struct agilent_82357a_priv *a_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); + struct usb_device *usb_dev; struct agilent_82357a_register_pairlet write; int retval;
+ if (!a_priv->bus_interface) + return -ENODEV; + + usb_dev = interface_to_usbdev(a_priv->bus_interface); write.address = AUXCR; write.value = AUX_GTS; retval = agilent_82357a_write_registers(a_priv, &write, 1); if (retval) - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); return 0; }
@@ -750,11 +755,15 @@ static int agilent_82357a_go_to_standby(gpib_board_t *board) static void agilent_82357a_request_system_control(gpib_board_t *board, int request_control) { struct agilent_82357a_priv *a_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); + struct usb_device *usb_dev; struct agilent_82357a_register_pairlet writes[2]; int retval; int i = 0;
+ if (!a_priv->bus_interface) + return; // -ENODEV; + + usb_dev = interface_to_usbdev(a_priv->bus_interface); /* 82357B needs bit to be set in 9914 AUXCR register */ writes[i].address = AUXCR; if (request_control) { @@ -771,18 +780,21 @@ static void agilent_82357a_request_system_control(gpib_board_t *board, int reque ++i; retval = agilent_82357a_write_registers(a_priv, writes, i); if (retval) - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); return;// retval; }
static void agilent_82357a_interface_clear(gpib_board_t *board, int assert) { struct agilent_82357a_priv *a_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); + struct usb_device *usb_dev; struct agilent_82357a_register_pairlet write; int retval;
+ if (!a_priv->bus_interface) + return; // -ENODEV; + + usb_dev = interface_to_usbdev(a_priv->bus_interface); write.address = AUXCR; write.value = AUX_SIC; if (assert) { @@ -791,25 +803,27 @@ static void agilent_82357a_interface_clear(gpib_board_t *board, int assert) } retval = agilent_82357a_write_registers(a_priv, &write, 1); if (retval) - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); }
static void agilent_82357a_remote_enable(gpib_board_t *board, int enable) { struct agilent_82357a_priv *a_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); + struct usb_device *usb_dev; struct agilent_82357a_register_pairlet write; int retval;
+ if (!a_priv->bus_interface) + return; //-ENODEV; + + usb_dev = interface_to_usbdev(a_priv->bus_interface); write.address = AUXCR; write.value = AUX_SRE; if (enable) write.value |= AUX_CS; retval = agilent_82357a_write_registers(a_priv, &write, 1); if (retval) - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); a_priv->ren_state = enable; return;// 0; } @@ -818,10 +832,11 @@ static int agilent_82357a_enable_eos(gpib_board_t *board, uint8_t eos_byte, int { struct agilent_82357a_priv *a_priv = board->private_data;
- if (compare_8_bits == 0) { - pr_warn("%s: hardware only supports 8-bit EOS compare", __func__); + if (!a_priv->bus_interface) + return -ENODEV; + if (compare_8_bits == 0) return -EOPNOTSUPP; - } + a_priv->eos_char = eos_byte; a_priv->eos_mode = REOS | BIN; return 0; @@ -837,10 +852,13 @@ static void agilent_82357a_disable_eos(gpib_board_t *board) static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned int clear_mask) { struct agilent_82357a_priv *a_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); + struct usb_device *usb_dev; struct agilent_82357a_register_pairlet address_status, bus_status; int retval;
+ if (!a_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(a_priv->bus_interface); board->status &= ~clear_mask; if (a_priv->is_cic) set_bit(CIC_NUM, &board->status); @@ -850,8 +868,7 @@ static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned i retval = agilent_82357a_read_registers(a_priv, &address_status, 1, 0); if (retval) { if (retval != -EAGAIN) - dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "read_registers() returned error\n"); return board->status; } // check for remote/local @@ -883,8 +900,7 @@ static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned i retval = agilent_82357a_read_registers(a_priv, &bus_status, 1, 0); if (retval) { if (retval != -EAGAIN) - dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "read_registers() returned error\n"); return board->status; } if (bus_status.value & BSR_SRQ_BIT) @@ -902,13 +918,15 @@ static int agilent_82357a_primary_address(gpib_board_t *board, unsigned int addr struct agilent_82357a_register_pairlet write; int retval;
+ if (!a_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(a_priv->bus_interface); // put primary address in address0 write.address = ADR; write.value = address & ADDRESS_MASK; retval = agilent_82357a_write_registers(a_priv, &write, 1); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); return retval; } return retval; @@ -917,18 +935,21 @@ static int agilent_82357a_primary_address(gpib_board_t *board, unsigned int addr static int agilent_82357a_secondary_address(gpib_board_t *board, unsigned int address, int enable) { if (enable) - pr_warn("%s: warning: assigning a secondary address not supported\n", __func__); - return -EOPNOTSUPP; + return -EOPNOTSUPP; + return 0; }
static int agilent_82357a_parallel_poll(gpib_board_t *board, uint8_t *result) { struct agilent_82357a_priv *a_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); + struct usb_device *usb_dev; struct agilent_82357a_register_pairlet writes[2]; struct agilent_82357a_register_pairlet read; int retval;
+ if (!a_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(a_priv->bus_interface); // execute parallel poll writes[0].address = AUXCR; writes[0].value = AUX_CS | AUX_RPP; @@ -936,16 +957,14 @@ static int agilent_82357a_parallel_poll(gpib_board_t *board, uint8_t *result) writes[1].value = a_priv->hw_control_bits & ~NOT_PARALLEL_POLL; retval = agilent_82357a_write_registers(a_priv, writes, 2); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); return retval; } udelay(2); //silly, since usb write will take way longer read.address = CPTR; retval = agilent_82357a_read_registers(a_priv, &read, 1, 1); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "read_registers() returned error\n"); return retval; } *result = read.value; @@ -956,8 +975,7 @@ static int agilent_82357a_parallel_poll(gpib_board_t *board, uint8_t *result) writes[1].value = AUX_RPP; retval = agilent_82357a_write_registers(a_priv, writes, 2); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); return retval; } return 0; @@ -996,17 +1014,19 @@ static void agilent_82357a_return_to_local(gpib_board_t *board) static int agilent_82357a_line_status(const gpib_board_t *board) { struct agilent_82357a_priv *a_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); + struct usb_device *usb_dev; struct agilent_82357a_register_pairlet bus_status; int retval; int status = ValidALL;
+ if (!a_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(a_priv->bus_interface); bus_status.address = BSR; retval = agilent_82357a_read_registers(a_priv, &bus_status, 1, 0); if (retval) { if (retval != -EAGAIN) - dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "read_registers() returned error\n"); return retval; } if (bus_status.value & BSR_REN_BIT) @@ -1047,16 +1067,18 @@ static unsigned short nanosec_to_fast_talker_bits(unsigned int *nanosec) static unsigned int agilent_82357a_t1_delay(gpib_board_t *board, unsigned int nanosec) { struct agilent_82357a_priv *a_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); + struct usb_device *usb_dev; struct agilent_82357a_register_pairlet write; int retval;
+ if (!a_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(a_priv->bus_interface); write.address = FAST_TALKER_T1; write.value = nanosec_to_fast_talker_bits(&nanosec); retval = agilent_82357a_write_registers(a_priv, &write, 1); if (retval) - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); return nanosec; }
@@ -1081,7 +1103,7 @@ static void agilent_82357a_interrupt_complete(struct urb *urb) default: /* other error, resubmit */ retval = usb_submit_urb(a_priv->interrupt_urb, GFP_ATOMIC); if (retval) - dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__); + dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n"); return; }
@@ -1097,7 +1119,7 @@ static void agilent_82357a_interrupt_complete(struct urb *urb)
retval = usb_submit_urb(a_priv->interrupt_urb, GFP_ATOMIC); if (retval) - dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__); + dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n"); }
static int agilent_82357a_setup_urbs(gpib_board_t *board) @@ -1133,8 +1155,7 @@ static int agilent_82357a_setup_urbs(gpib_board_t *board) if (retval) { usb_free_urb(a_priv->interrupt_urb); a_priv->interrupt_urb = NULL; - dev_err(&usb_dev->dev, "%s: failed to submit first interrupt urb, retval=%i\n", - __func__, retval); + dev_err(&usb_dev->dev, "failed to submit first interrupt urb, retval=%i\n", retval); goto setup_exit; } mutex_unlock(&a_priv->interrupt_alloc_lock); @@ -1184,108 +1205,78 @@ static void agilent_82357a_free_private(gpib_board_t *board) { kfree(board->private_data); board->private_data = NULL; - }
+#define INIT_NUM_REG_WRITES 18 static int agilent_82357a_init(gpib_board_t *board) { struct agilent_82357a_priv *a_priv = board->private_data; struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); struct agilent_82357a_register_pairlet hw_control; - struct agilent_82357a_register_pairlet writes[0x20]; + struct agilent_82357a_register_pairlet writes[INIT_NUM_REG_WRITES]; int retval; - int i; unsigned int nanosec;
- i = 0; - writes[i].address = LED_CONTROL; - writes[i].value = FAIL_LED_ON; - ++i; - writes[i].address = RESET_TO_POWERUP; - writes[i].value = RESET_SPACEBALL; - ++i; - retval = agilent_82357a_write_registers(a_priv, writes, i); + writes[0].address = LED_CONTROL; + writes[0].value = FAIL_LED_ON; + writes[1].address = RESET_TO_POWERUP; + writes[1].value = RESET_SPACEBALL; + retval = agilent_82357a_write_registers(a_priv, writes, 2); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); return -EIO; } set_current_state(TASK_INTERRUPTIBLE); if (schedule_timeout(usec_to_jiffies(2000))) return -ERESTARTSYS; - i = 0; - writes[i].address = AUXCR; - writes[i].value = AUX_NBAF; - ++i; - writes[i].address = AUXCR; - writes[i].value = AUX_HLDE; - ++i; - writes[i].address = AUXCR; - writes[i].value = AUX_TON; - ++i; - writes[i].address = AUXCR; - writes[i].value = AUX_LON; - ++i; - writes[i].address = AUXCR; - writes[i].value = AUX_RSV2; - ++i; - writes[i].address = AUXCR; - writes[i].value = AUX_INVAL; - ++i; - writes[i].address = AUXCR; - writes[i].value = AUX_RPP; - ++i; - writes[i].address = AUXCR; - writes[i].value = AUX_STDL; - ++i; - writes[i].address = AUXCR; - writes[i].value = AUX_VSTDL; - ++i; - writes[i].address = FAST_TALKER_T1; + writes[0].address = AUXCR; + writes[0].value = AUX_NBAF; + writes[1].address = AUXCR; + writes[1].value = AUX_HLDE; + writes[2].address = AUXCR; + writes[2].value = AUX_TON; + writes[3].address = AUXCR; + writes[3].value = AUX_LON; + writes[4].address = AUXCR; + writes[4].value = AUX_RSV2; + writes[5].address = AUXCR; + writes[5].value = AUX_INVAL; + writes[6].address = AUXCR; + writes[6].value = AUX_RPP; + writes[7].address = AUXCR; + writes[7].value = AUX_STDL; + writes[8].address = AUXCR; + writes[8].value = AUX_VSTDL; + writes[9].address = FAST_TALKER_T1; nanosec = board->t1_nano_sec; - writes[i].value = nanosec_to_fast_talker_bits(&nanosec); + writes[9].value = nanosec_to_fast_talker_bits(&nanosec); board->t1_nano_sec = nanosec; - ++i; - writes[i].address = ADR; - writes[i].value = board->pad & ADDRESS_MASK; - ++i; - writes[i].address = PPR; - writes[i].value = 0; - ++i; - writes[i].address = SPMR; - writes[i].value = 0; - ++i; - writes[i].address = PROTOCOL_CONTROL; - writes[i].value = WRITE_COMPLETE_INTERRUPT_EN; - ++i; - writes[i].address = IMR0; - writes[i].value = HR_BOIE | HR_BIIE; - ++i; - writes[i].address = IMR1; - writes[i].value = HR_SRQIE; - ++i; + writes[10].address = ADR; + writes[10].value = board->pad & ADDRESS_MASK; + writes[11].address = PPR; + writes[11].value = 0; + writes[12].address = SPMR; + writes[12].value = 0; + writes[13].address = PROTOCOL_CONTROL; + writes[13].value = WRITE_COMPLETE_INTERRUPT_EN; + writes[14].address = IMR0; + writes[14].value = HR_BOIE | HR_BIIE; + writes[15].address = IMR1; + writes[15].value = HR_SRQIE; // turn off reset state - writes[i].address = AUXCR; - writes[i].value = AUX_CHIP_RESET; - ++i; - writes[i].address = LED_CONTROL; - writes[i].value = FIRMWARE_LED_CONTROL; - ++i; - if (i > ARRAY_SIZE(writes)) { - dev_err(&usb_dev->dev, "%s: bug! writes[] overflow\n", __func__); - return -EFAULT; - } - retval = agilent_82357a_write_registers(a_priv, writes, i); + writes[16].address = AUXCR; + writes[16].value = AUX_CHIP_RESET; + writes[17].address = LED_CONTROL; + writes[17].value = FIRMWARE_LED_CONTROL; + retval = agilent_82357a_write_registers(a_priv, writes, INIT_NUM_REG_WRITES); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); return -EIO; } hw_control.address = HW_CONTROL; retval = agilent_82357a_read_registers(a_priv, &hw_control, 1, 1); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "read_registers() returned error\n"); return -EIO; } a_priv->hw_control_bits = (hw_control.value & ~0x7) | NOT_TI_RESET | NOT_PARALLEL_POLL; @@ -1336,7 +1327,7 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t } if (i == MAX_NUM_82357A_INTERFACES) { dev_err(board->gpib_dev, - "No Agilent 82357 gpib adapters found, have you loaded its firmware?\n"); + "No supported adapters found, have you loaded its firmware?\n"); retval = -ENODEV; goto attach_fail; } @@ -1372,8 +1363,7 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t goto attach_fail; }
- dev_info(&usb_dev->dev, - "bus %d dev num %d attached to gpib minor %d, agilent usb interface %i\n", + dev_info(&usb_dev->dev, "bus %d dev num %d attached to gpib%d, interface %i\n", usb_dev->bus->busnum, usb_dev->devnum, board->minor, i); mutex_unlock(&agilent_82357a_hotplug_lock); return retval; @@ -1390,37 +1380,24 @@ static int agilent_82357a_go_idle(gpib_board_t *board) struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface); struct agilent_82357a_register_pairlet writes[0x20]; int retval; - int i;
- i = 0; // turn on tms9914 reset state - writes[i].address = AUXCR; - writes[i].value = AUX_CS | AUX_CHIP_RESET; - ++i; + writes[0].address = AUXCR; + writes[0].value = AUX_CS | AUX_CHIP_RESET; a_priv->hw_control_bits &= ~NOT_TI_RESET; - writes[i].address = HW_CONTROL; - writes[i].value = a_priv->hw_control_bits; - ++i; - writes[i].address = PROTOCOL_CONTROL; - writes[i].value = 0; - ++i; - writes[i].address = IMR0; - writes[i].value = 0; - ++i; - writes[i].address = IMR1; - writes[i].value = 0; - ++i; - writes[i].address = LED_CONTROL; - writes[i].value = 0; - ++i; - if (i > ARRAY_SIZE(writes)) { - dev_err(&usb_dev->dev, "%s: bug! writes[] overflow\n", __func__); - return -EFAULT; - } - retval = agilent_82357a_write_registers(a_priv, writes, i); + writes[1].address = HW_CONTROL; + writes[1].value = a_priv->hw_control_bits; + writes[2].address = PROTOCOL_CONTROL; + writes[2].value = 0; + writes[3].address = IMR0; + writes[3].value = 0; + writes[4].address = IMR1; + writes[4].value = 0; + writes[5].address = LED_CONTROL; + writes[5].value = 0; + retval = agilent_82357a_write_registers(a_priv, writes, 6); if (retval) { - dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n", - __func__); + dev_err(&usb_dev->dev, "write_registers() returned error\n"); return -EIO; } return 0; @@ -1445,7 +1422,6 @@ static void agilent_82357a_detach(gpib_board_t *board) agilent_82357a_release_urbs(a_priv); agilent_82357a_free_private(board); } - dev_info(board->gpib_dev, "%s: detached\n", __func__); mutex_unlock(&agilent_82357a_hotplug_lock); }
@@ -1510,8 +1486,7 @@ static int agilent_82357a_driver_probe(struct usb_interface *interface, if (i == MAX_NUM_82357A_INTERFACES) { usb_put_dev(usb_dev); mutex_unlock(&agilent_82357a_hotplug_lock); - dev_err(&usb_dev->dev, "%s: out of space in agilent_82357a_driver_interfaces[]\n", - __func__); + dev_err(&usb_dev->dev, "out of space in agilent_82357a_driver_interfaces[]\n"); return -1; } path = kmalloc(path_length, GFP_KERNEL); @@ -1552,13 +1527,12 @@ static void agilent_82357a_driver_disconnect(struct usb_interface *interface) mutex_unlock(&a_priv->control_alloc_lock); } } - dev_dbg(&usb_dev->dev, "nulled agilent_82357a_driver_interfaces[%i]\n", i); agilent_82357a_driver_interfaces[i] = NULL; break; } } if (i == MAX_NUM_82357A_INTERFACES) - dev_err(&usb_dev->dev, "unable to find interface in agilent_82357a_driver_interfaces[]? bug?\n"); + dev_err(&usb_dev->dev, "unable to find interface - bug?\n"); usb_put_dev(usb_dev);
mutex_unlock(&agilent_82357a_hotplug_lock); @@ -1583,18 +1557,18 @@ static int agilent_82357a_driver_suspend(struct usb_interface *interface, pm_mes agilent_82357a_abort(a_priv, 0); retval = agilent_82357a_go_idle(board); if (retval) { - dev_err(&usb_dev->dev, "%s: failed to go idle, retval=%i\n", - __func__, retval); + dev_err(&usb_dev->dev, "failed to go idle, retval=%i\n", + retval); mutex_unlock(&agilent_82357a_hotplug_lock); return retval; } mutex_lock(&a_priv->interrupt_alloc_lock); agilent_82357a_cleanup_urbs(a_priv); mutex_unlock(&a_priv->interrupt_alloc_lock); - dev_info(&usb_dev->dev, - "bus %d dev num %d gpib minor %d, agilent usb interface %i suspended\n", - usb_dev->bus->busnum, usb_dev->devnum, - board->minor, i); + dev_dbg(&usb_dev->dev, + "bus %d dev num %d gpib %d, interface %i suspended\n", + usb_dev->bus->busnum, usb_dev->devnum, + board->minor, i); } } break; @@ -1631,8 +1605,8 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface) mutex_lock(&a_priv->interrupt_alloc_lock); retval = usb_submit_urb(a_priv->interrupt_urb, GFP_KERNEL); if (retval) { - dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb, retval=%i\n", - __func__, retval); + dev_err(&usb_dev->dev, "failed to resubmit interrupt urb in resume, retval=%i\n", + retval); mutex_unlock(&a_priv->interrupt_alloc_lock); mutex_unlock(&agilent_82357a_hotplug_lock); return retval; @@ -1655,9 +1629,9 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface) // assert/unassert REN agilent_82357a_remote_enable(board, a_priv->ren_state);
- dev_info(&usb_dev->dev, - "bus %d dev num %d gpib minor %d, agilent usb interface %i resumed\n", - usb_dev->bus->busnum, usb_dev->devnum, board->minor, i); + dev_dbg(&usb_dev->dev, + "bus %d dev num %d gpib%d, interface %i resumed\n", + usb_dev->bus->busnum, usb_dev->devnum, board->minor, i); }
resume_exit: @@ -1667,7 +1641,7 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface) }
static struct usb_driver agilent_82357a_bus_driver = { - .name = "agilent_82357a_gpib", + .name = DRV_NAME, .probe = agilent_82357a_driver_probe, .disconnect = agilent_82357a_driver_disconnect, .suspend = agilent_82357a_driver_suspend, @@ -1680,19 +1654,18 @@ static int __init agilent_82357a_init_module(void) int i; int ret;
- pr_info("agilent_82357a_gpib driver loading"); for (i = 0; i < MAX_NUM_82357A_INTERFACES; ++i) agilent_82357a_driver_interfaces[i] = NULL;
ret = usb_register(&agilent_82357a_bus_driver); if (ret) { - pr_err("agilent_82357a: usb_register failed: error = %d\n", ret); + pr_err("usb_register failed: error = %d\n", ret); return ret; }
ret = gpib_register_driver(&agilent_82357a_gpib_interface, THIS_MODULE); if (ret) { - pr_err("agilent_82357a: gpib_register_driver failed: error = %d\n", ret); + pr_err("gpib_register_driver failed: error = %d\n", ret); usb_deregister(&agilent_82357a_bus_driver); return ret; } @@ -1702,7 +1675,6 @@ static int __init agilent_82357a_init_module(void)
static void __exit agilent_82357a_exit_module(void) { - pr_info("agilent_82357a_gpib driver unloading"); gpib_unregister_driver(&agilent_82357a_gpib_interface); usb_deregister(&agilent_82357a_bus_driver); } diff --git a/drivers/staging/gpib/cb7210/cb7210.c b/drivers/staging/gpib/cb7210/cb7210.c index 4d22f647a453..ab93061263bf 100644 --- a/drivers/staging/gpib/cb7210/cb7210.c +++ b/drivers/staging/gpib/cb7210/cb7210.c @@ -1342,8 +1342,8 @@ static struct pcmcia_device_id cb_pcmcia_ids[] = { MODULE_DEVICE_TABLE(pcmcia, cb_pcmcia_ids);
static struct pcmcia_driver cb_gpib_cs_driver = { + .name = "cb_gpib_cs", .owner = THIS_MODULE, - .drv = { .name = "cb_gpib_cs", }, .id_table = cb_pcmcia_ids, .probe = cb_gpib_probe, .remove = cb_gpib_remove, diff --git a/drivers/staging/gpib/hp_82341/hp_82341.c b/drivers/staging/gpib/hp_82341/hp_82341.c index 0ddae295912f..589c4fee1d56 100644 --- a/drivers/staging/gpib/hp_82341/hp_82341.c +++ b/drivers/staging/gpib/hp_82341/hp_82341.c @@ -718,7 +718,7 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config) for (i = 0; i < hp_82341_num_io_regions; ++i) { start_addr = iobase + i * hp_priv->io_region_offset; if (!request_region(start_addr, hp_82341_region_iosize, "hp_82341")) { - pr_err("hp_82341: failed to allocate io ports 0x%lx-0x%lx\n", + pr_err("hp_82341: failed to allocate io ports 0x%x-0x%x\n", start_addr, start_addr + hp_82341_region_iosize - 1); return -EIO; diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c index d0656dc520f5..1b976a28a7fe 100644 --- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c +++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c @@ -5,6 +5,10 @@ * copyright : (C) 2004 by Frank Mori Hess ***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define dev_fmt pr_fmt +#define DRV_NAME KBUILD_MODNAME + #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -75,7 +79,7 @@ static unsigned short ni_usb_timeout_code(unsigned int usec) */ else if (usec <= 1000000000) return 0x02; - pr_err("%s: bug? usec is greater than 1e9\n", __func__); + pr_err("bug? usec is greater than 1e9\n"); return 0xf0; }
@@ -83,8 +87,6 @@ static void ni_usb_bulk_complete(struct urb *urb) { struct ni_usb_urb_ctx *context = urb->context;
-// printk("debug: %s: status=0x%x, error_count=%i, actual_length=%i\n", __func__, -// urb->status, urb->error_count, urb->actual_length); complete(&context->complete); }
@@ -137,8 +139,8 @@ static int ni_usb_nonblocking_send_bulk_msg(struct ni_usb_priv *ni_priv, void *d del_timer_sync(&ni_priv->bulk_timer); usb_free_urb(ni_priv->bulk_urb); ni_priv->bulk_urb = NULL; - dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n", - __func__, retval); + dev_err(&usb_dev->dev, "failed to submit bulk out urb, retval=%i\n", + retval); mutex_unlock(&ni_priv->bulk_transfer_lock); return retval; } @@ -146,7 +148,7 @@ static int ni_usb_nonblocking_send_bulk_msg(struct ni_usb_priv *ni_priv, void *d wait_for_completion(&context->complete); // wait for ni_usb_bulk_complete if (context->timed_out) { usb_kill_urb(ni_priv->bulk_urb); - dev_err(&usb_dev->dev, "%s: killed urb due to timeout\n", __func__); + dev_err(&usb_dev->dev, "killed urb due to timeout\n"); retval = -ETIMEDOUT; } else { retval = ni_priv->bulk_urb->status; @@ -218,14 +220,12 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv, if (timeout_msecs) mod_timer(&ni_priv->bulk_timer, jiffies + msecs_to_jiffies(timeout_msecs));
- //printk("%s: submitting urb\n", __func__); retval = usb_submit_urb(ni_priv->bulk_urb, GFP_KERNEL); if (retval) { del_timer_sync(&ni_priv->bulk_timer); usb_free_urb(ni_priv->bulk_urb); ni_priv->bulk_urb = NULL; - dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n", - __func__, retval); + dev_err(&usb_dev->dev, "failed to submit bulk in urb, retval=%i\n", retval); mutex_unlock(&ni_priv->bulk_transfer_lock); return retval; } @@ -250,7 +250,7 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv, } if (context->timed_out) { usb_kill_urb(ni_priv->bulk_urb); - dev_err(&usb_dev->dev, "%s: killed urb due to timeout\n", __func__); + dev_err(&usb_dev->dev, "killed urb due to timeout\n"); retval = -ETIMEDOUT; } else { if (ni_priv->bulk_urb->status) @@ -330,14 +330,14 @@ static void ni_usb_soft_update_status(gpib_board_t *board, unsigned int ni_usb_i ni_priv->monitored_ibsta_bits &= ~ni_usb_ibsta; need_monitoring_bits &= ~ni_priv->monitored_ibsta_bits; /* mm - monitored set */ spin_unlock_irqrestore(&board->spinlock, flags); - dev_dbg(&usb_dev->dev, "%s: need_monitoring_bits=0x%x\n", __func__, need_monitoring_bits); + dev_dbg(&usb_dev->dev, "need_monitoring_bits=0x%x\n", need_monitoring_bits);
if (need_monitoring_bits & ~ni_usb_ibsta) ni_usb_set_interrupt_monitor(board, ni_usb_ibsta_monitor_mask); else if (need_monitoring_bits & ni_usb_ibsta) wake_up_interruptible(&board->wait);
- dev_dbg(&usb_dev->dev, "%s: ni_usb_ibsta=0x%x\n", __func__, ni_usb_ibsta); + dev_dbg(&usb_dev->dev, "ibsta=0x%x\n", ni_usb_ibsta); }
static int ni_usb_parse_status_block(const u8 *buffer, struct ni_usb_status_block *status) @@ -371,7 +371,7 @@ static int ni_usb_parse_register_read_block(const u8 *raw_data, unsigned int *re int k;
if (raw_data[i++] != NIUSB_REGISTER_READ_DATA_START_ID) { - pr_err("%s: parse error: wrong start id\n", __func__); + pr_err("parse error: wrong start id\n"); unexpected = 1; } for (k = 0; k < results_per_chunk && j < num_results; ++k) @@ -380,18 +380,18 @@ static int ni_usb_parse_register_read_block(const u8 *raw_data, unsigned int *re while (i % 4) i++; if (raw_data[i++] != NIUSB_REGISTER_READ_DATA_END_ID) { - pr_err("%s: parse error: wrong end id\n", __func__); + pr_err("parse error: wrong end id\n"); unexpected = 1; } if (raw_data[i++] % results_per_chunk != num_results % results_per_chunk) { - pr_err("%s: parse error: wrong count=%i for NIUSB_REGISTER_READ_DATA_END\n", - __func__, (int)raw_data[i - 1]); + pr_err("parse error: wrong count=%i for NIUSB_REGISTER_READ_DATA_END\n", + (int)raw_data[i - 1]); unexpected = 1; } while (i % 4) { if (raw_data[i++] != 0) { - pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n", - __func__, i - 1, (int)raw_data[i - 1]); + pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n", + i - 1, (int)raw_data[i - 1]); unexpected = 1; } } @@ -408,9 +408,8 @@ static int ni_usb_parse_termination_block(const u8 *buffer) buffer[i++] != 0x0 || buffer[i++] != 0x0 || buffer[i++] != 0x0) { - pr_err("%s: received unexpected termination block\n", __func__); - pr_err(" expected: 0x%x 0x%x 0x%x 0x%x\n", - NIUSB_TERM_ID, 0x0, 0x0, 0x0); + pr_err("received unexpected termination block\n"); + pr_err(" expected: 0x%x 0x%x 0x%x 0x%x\n", NIUSB_TERM_ID, 0x0, 0x0, 0x0); pr_err(" received: 0x%x 0x%x 0x%x 0x%x\n", buffer[i - 4], buffer[i - 3], buffer[i - 2], buffer[i - 1]); } @@ -438,12 +437,12 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl } else if (raw_data[i] == NIUSB_IBRD_EXTENDED_DATA_ID) { data_block_length = ibrd_extended_data_block_length; if (raw_data[++i] != 0) { - pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n", - __func__, i, (int)raw_data[i]); + pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n", + i, (int)raw_data[i]); unexpected = 1; } } else { - pr_err("%s: logic bug!\n", __func__); + pr_err("Unexpected NIUSB_IBRD ID\n"); return -EINVAL; } ++i; @@ -457,7 +456,7 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl } i += ni_usb_parse_status_block(&raw_data[i], status); if (status->id != NIUSB_IBRD_STATUS_ID) { - pr_err("%s: bug: status->id=%i, != ibrd_status_id\n", __func__, status->id); + pr_err("bug: status->id=%i, != ibrd_status_id\n", status->id); return -EIO; } adr1_bits = raw_data[i++]; @@ -468,29 +467,28 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl *actual_bytes_read = 0; } if (*actual_bytes_read > j) - pr_err("%s: bug: discarded data. actual_bytes_read=%i, j=%i\n", - __func__, *actual_bytes_read, j); + pr_err("bug: discarded data. actual_bytes_read=%i, j=%i\n", *actual_bytes_read, j); for (k = 0; k < 2; k++) if (raw_data[i++] != 0) { - pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n", - __func__, i - 1, (int)raw_data[i - 1]); + pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n", + i - 1, (int)raw_data[i - 1]); unexpected = 1; } i += ni_usb_parse_status_block(&raw_data[i], ®ister_write_status); if (register_write_status.id != NIUSB_REG_WRITE_ID) { - pr_err("%s: unexpected data: register write status id=0x%x, expected 0x%x\n", - __func__, register_write_status.id, NIUSB_REG_WRITE_ID); + pr_err("unexpected data: register write status id=0x%x, expected 0x%x\n", + register_write_status.id, NIUSB_REG_WRITE_ID); unexpected = 1; } if (raw_data[i++] != 2) { - pr_err("%s: unexpected data: register write count=%i, expected 2\n", - __func__, (int)raw_data[i - 1]); + pr_err("unexpected data: register write count=%i, expected 2\n", + (int)raw_data[i - 1]); unexpected = 1; } for (k = 0; k < 3; k++) if (raw_data[i++] != 0) { - pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n", - __func__, i - 1, (int)raw_data[i - 1]); + pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n", + i - 1, (int)raw_data[i - 1]); unexpected = 1; } i += ni_usb_parse_termination_block(&raw_data[i]); @@ -530,18 +528,14 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
out_data_length = num_writes * bytes_per_write + 0x10; out_data = kmalloc(out_data_length, GFP_KERNEL); - if (!out_data) { - dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__); + if (!out_data) return -ENOMEM; - } i += ni_usb_bulk_register_write_header(&out_data[i], num_writes); for (j = 0; j < num_writes; j++) i += ni_usb_bulk_register_write(&out_data[i], writes[j]); while (i % 4) out_data[i++] = 0x00; i += ni_usb_bulk_termination(&out_data[i]); - if (i > out_data_length) - dev_err(&usb_dev->dev, "%s: bug! buffer overrun\n", __func__);
mutex_lock(&ni_priv->addressed_transfer_lock);
@@ -549,22 +543,21 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv, kfree(out_data); if (retval) { mutex_unlock(&ni_priv->addressed_transfer_lock); - dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n", - __func__, retval, bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n", + retval, bytes_written, i); return retval; }
in_data = kmalloc(in_data_length, GFP_KERNEL); if (!in_data) { mutex_unlock(&ni_priv->addressed_transfer_lock); - dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__); return -ENOMEM; } retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0); if (retval || bytes_read != 16) { mutex_unlock(&ni_priv->addressed_transfer_lock); - dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); ni_usb_dump_raw_block(in_data, bytes_read); kfree(in_data); return retval; @@ -576,18 +569,16 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv, //FIXME parse extra 09 status bits and termination kfree(in_data); if (status.id != NIUSB_REG_WRITE_ID) { - dev_err(&usb_dev->dev, "%s: parse error, id=0x%x != NIUSB_REG_WRITE_ID\n", - __func__, status.id); + dev_err(&usb_dev->dev, "parse error, id=0x%x != NIUSB_REG_WRITE_ID\n", status.id); return -EIO; } if (status.error_code) { - dev_err(&usb_dev->dev, "%s: nonzero error code 0x%x\n", - __func__, status.error_code); + dev_err(&usb_dev->dev, "nonzero error code 0x%x\n", status.error_code); return -EIO; } if (reg_writes_completed != num_writes) { - dev_err(&usb_dev->dev, "%s: reg_writes_completed=%i, num_writes=%i\n", - __func__, reg_writes_completed, num_writes); + dev_err(&usb_dev->dev, "reg_writes_completed=%i, num_writes=%i\n", + reg_writes_completed, num_writes); return -EIO; } if (ibsta) @@ -601,7 +592,7 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length, { int retval, parse_retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; u8 *out_data, *in_data; static const int out_data_length = 0x20; int in_data_length; @@ -614,10 +605,11 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length, struct ni_usb_register reg;
*bytes_read = 0; - if (length > max_read_length) { - length = max_read_length; - dev_err(&usb_dev->dev, "%s: read length too long\n", __func__); - } + if (!ni_priv->bus_interface) + return -ENODEV; + if (length > max_read_length) + return -EINVAL; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); out_data = kmalloc(out_data_length, GFP_KERNEL); if (!out_data) return -ENOMEM; @@ -649,8 +641,8 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length, if (retval || usb_bytes_written != i) { if (retval == 0) retval = -EIO; - dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n", - __func__, retval, usb_bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n", + retval, usb_bytes_written, i); mutex_unlock(&ni_priv->addressed_transfer_lock); return retval; } @@ -668,8 +660,8 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
if (retval == -ERESTARTSYS) { } else if (retval) { - dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, usb_bytes_read=%i\n", - __func__, retval, usb_bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, usb_bytes_read=%i\n", + retval, usb_bytes_read); kfree(in_data); return retval; } @@ -677,14 +669,14 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length, if (parse_retval != usb_bytes_read) { if (parse_retval >= 0) parse_retval = -EIO; - dev_err(&usb_dev->dev, "%s: retval=%i usb_bytes_read=%i\n", - __func__, parse_retval, usb_bytes_read); + dev_err(&usb_dev->dev, "retval=%i usb_bytes_read=%i\n", + parse_retval, usb_bytes_read); kfree(in_data); return parse_retval; } if (actual_length != length - status.count) { - dev_err(&usb_dev->dev, "%s: actual_length=%i expected=%li\n", - __func__, actual_length, (long)(length - status.count)); + dev_err(&usb_dev->dev, "actual_length=%i expected=%li\n", + actual_length, (long)(length - status.count)); ni_usb_dump_raw_block(in_data, usb_bytes_read); } kfree(in_data); @@ -699,7 +691,7 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length, break; case NIUSB_ATN_STATE_ERROR: retval = -EIO; - dev_err(&usb_dev->dev, "%s: read when ATN set\n", __func__); + dev_err(&usb_dev->dev, "read when ATN set\n"); break; case NIUSB_ADDRESSING_ERROR: retval = -EIO; @@ -708,12 +700,11 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length, retval = -ETIMEDOUT; break; case NIUSB_EOSMODE_ERROR: - dev_err(&usb_dev->dev, "%s: driver bug, we should have been able to avoid NIUSB_EOSMODE_ERROR.\n", - __func__); + dev_err(&usb_dev->dev, "driver bug, we should have been able to avoid NIUSB_EOSMODE_ERROR.\n"); retval = -EINVAL; break; default: - dev_err(&usb_dev->dev, "%s: unknown error code=%i\n", __func__, status.error_code); + dev_err(&usb_dev->dev, "unknown error code=%i\n", status.error_code); retval = -EIO; break; } @@ -731,7 +722,7 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length, { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; u8 *out_data, *in_data; int out_data_length; static const int in_data_length = 0x10; @@ -741,12 +732,11 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length, struct ni_usb_status_block status; static const int max_write_length = 0xffff;
- *bytes_written = 0; - if (length > max_write_length) { - length = max_write_length; - send_eoi = 0; - dev_err(&usb_dev->dev, "%s: write length too long\n", __func__); - } + if (!ni_priv->bus_interface) + return -ENODEV; + if (length > max_write_length) + return -EINVAL; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); out_data_length = length + 0x10; out_data = kmalloc(out_data_length, GFP_KERNEL); if (!out_data) @@ -777,8 +767,8 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length, kfree(out_data); if (retval || usb_bytes_written != i) { mutex_unlock(&ni_priv->addressed_transfer_lock); - dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n", - __func__, retval, usb_bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n", + retval, usb_bytes_written, i); return retval; }
@@ -793,8 +783,8 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length, mutex_unlock(&ni_priv->addressed_transfer_lock);
if ((retval && retval != -ERESTARTSYS) || usb_bytes_read != 12) { - dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, usb_bytes_read=%i\n", - __func__, retval, usb_bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, usb_bytes_read=%i\n", + retval, usb_bytes_read); kfree(in_data); return retval; } @@ -810,8 +800,8 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length, */ break; case NIUSB_ADDRESSING_ERROR: - dev_err(&usb_dev->dev, "%s: Addressing error retval %d error code=%i\n", - __func__, retval, status.error_code); + dev_err(&usb_dev->dev, "Addressing error retval %d error code=%i\n", + retval, status.error_code); retval = -ENXIO; break; case NIUSB_NO_LISTENER_ERROR: @@ -821,8 +811,7 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length, retval = -ETIMEDOUT; break; default: - dev_err(&usb_dev->dev, "%s: unknown error code=%i\n", - __func__, status.error_code); + dev_err(&usb_dev->dev, "unknown error code=%i\n", status.error_code); retval = -EPIPE; break; } @@ -836,7 +825,7 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; u8 *out_data, *in_data; int out_data_length; static const int in_data_length = 0x10; @@ -848,8 +837,11 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len static const int max_command_length = 0x10;
*command_bytes_written = 0; + if (!ni_priv->bus_interface) + return -ENODEV; if (length > max_command_length) length = max_command_length; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); out_data_length = length + 0x10; out_data = kmalloc(out_data_length, GFP_KERNEL); if (!out_data) @@ -873,8 +865,8 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len kfree(out_data); if (retval || bytes_written != i) { mutex_unlock(&ni_priv->addressed_transfer_lock); - dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n", - __func__, retval, bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n", + retval, bytes_written, i); return retval; }
@@ -890,8 +882,8 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len mutex_unlock(&ni_priv->addressed_transfer_lock);
if ((retval && retval != -ERESTARTSYS) || bytes_read != 12) { - dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); kfree(in_data); return retval; } @@ -909,12 +901,12 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len case NIUSB_NO_BUS_ERROR: return -ENOTCONN; case NIUSB_EOSMODE_ERROR: - dev_err(&usb_dev->dev, "%s: got eosmode error. Driver bug?\n", __func__); + dev_err(&usb_dev->dev, "got eosmode error. Driver bug?\n"); return -EIO; case NIUSB_TIMEOUT_ERROR: return -ETIMEDOUT; default: - dev_err(&usb_dev->dev, "%s: unknown error code=%i\n", __func__, status.error_code); + dev_err(&usb_dev->dev, "unknown error code=%i\n", status.error_code); return -EIO; } ni_usb_soft_update_status(board, status.ibsta, 0); @@ -942,7 +934,7 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; u8 *out_data, *in_data; static const int out_data_length = 0x10; static const int in_data_length = 0x10; @@ -950,6 +942,9 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous) int i = 0; struct ni_usb_status_block status;
+ if (!ni_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); out_data = kmalloc(out_data_length, GFP_KERNEL); if (!out_data) return -ENOMEM; @@ -968,15 +963,14 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous) kfree(out_data); if (retval || bytes_written != i) { mutex_unlock(&ni_priv->addressed_transfer_lock); - dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n", - __func__, retval, bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n", + retval, bytes_written, i); return retval; }
in_data = kmalloc(in_data_length, GFP_KERNEL); if (!in_data) { mutex_unlock(&ni_priv->addressed_transfer_lock); - dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__); return -ENOMEM; } retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 1); @@ -986,8 +980,8 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous) if ((retval && retval != -ERESTARTSYS) || bytes_read != 12) { if (retval == 0) retval = -EIO; - dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); kfree(in_data); return retval; } @@ -1001,7 +995,7 @@ static int ni_usb_go_to_standby(gpib_board_t *board) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; u8 *out_data, *in_data; static const int out_data_length = 0x10; static const int in_data_length = 0x20; @@ -1009,6 +1003,9 @@ static int ni_usb_go_to_standby(gpib_board_t *board) int i = 0; struct ni_usb_status_block status;
+ if (!ni_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); out_data = kmalloc(out_data_length, GFP_KERNEL); if (!out_data) return -ENOMEM; @@ -1025,15 +1022,14 @@ static int ni_usb_go_to_standby(gpib_board_t *board) kfree(out_data); if (retval || bytes_written != i) { mutex_unlock(&ni_priv->addressed_transfer_lock); - dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n", - __func__, retval, bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n", + retval, bytes_written, i); return retval; }
in_data = kmalloc(in_data_length, GFP_KERNEL); if (!in_data) { mutex_unlock(&ni_priv->addressed_transfer_lock); - dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__); return -ENOMEM; } retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0); @@ -1041,16 +1037,15 @@ static int ni_usb_go_to_standby(gpib_board_t *board) mutex_unlock(&ni_priv->addressed_transfer_lock);
if (retval || bytes_read != 12) { - dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); kfree(in_data); return retval; } ni_usb_parse_status_block(in_data, &status); kfree(in_data); if (status.id != NIUSB_IBGTS_ID) - dev_err(&usb_dev->dev, "%s: bug: status.id 0x%x != INUSB_IBGTS_ID\n", - __func__, status.id); + dev_err(&usb_dev->dev, "bug: status.id 0x%x != INUSB_IBGTS_ID\n", status.id); ni_usb_soft_update_status(board, status.ibsta, 0); return 0; } @@ -1059,11 +1054,14 @@ static void ni_usb_request_system_control(gpib_board_t *board, int request_contr { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; int i = 0; struct ni_usb_register writes[4]; unsigned int ibsta;
+ if (!ni_priv->bus_interface) + return; // -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); if (request_control) { writes[i].device = NIUSB_SUBDEV_TNT4882; writes[i].address = CMDR; @@ -1093,7 +1091,7 @@ static void ni_usb_request_system_control(gpib_board_t *board, int request_contr } retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return; // retval; } if (!request_control) @@ -1107,7 +1105,7 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; u8 *out_data, *in_data; static const int out_data_length = 0x10; static const int in_data_length = 0x10; @@ -1115,14 +1113,15 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert) int i = 0; struct ni_usb_status_block status;
- // FIXME: we are going to pulse when assert is true, and ignore otherwise + if (!ni_priv->bus_interface) + return; // -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); +// FIXME: we are going to pulse when assert is true, and ignore otherwise if (assert == 0) return; out_data = kmalloc(out_data_length, GFP_KERNEL); - if (!out_data) { - dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__); + if (!out_data) return; - } out_data[i++] = NIUSB_IBSIC_ID; out_data[i++] = 0x0; out_data[i++] = 0x0; @@ -1131,8 +1130,8 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert) retval = ni_usb_send_bulk_msg(ni_priv, out_data, i, &bytes_written, 1000); kfree(out_data); if (retval || bytes_written != i) { - dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n", - __func__, retval, bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n", + retval, bytes_written, i); return; } in_data = kmalloc(in_data_length, GFP_KERNEL); @@ -1141,8 +1140,8 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert)
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0); if (retval || bytes_read != 12) { - dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); kfree(in_data); return; } @@ -1155,10 +1154,13 @@ static void ni_usb_remote_enable(gpib_board_t *board, int enable) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; struct ni_usb_register reg; unsigned int ibsta;
+ if (!ni_priv->bus_interface) + return; // -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); reg.device = NIUSB_SUBDEV_TNT4882; reg.address = nec7210_to_tnt4882_offset(AUXMR); if (enable) @@ -1167,7 +1169,7 @@ static void ni_usb_remote_enable(gpib_board_t *board, int enable) reg.value = AUX_CREN; retval = ni_usb_write_registers(ni_priv, ®, 1, &ibsta); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return; //retval; } ni_priv->ren_state = enable; @@ -1202,12 +1204,14 @@ static unsigned int ni_usb_update_status(gpib_board_t *board, unsigned int clear { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; static const int buffer_length = 8; u8 *buffer; struct ni_usb_status_block status;
- //printk("%s: receive control pipe is %i\n", __func__, pipe); + if (!ni_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); buffer = kmalloc(buffer_length, GFP_KERNEL); if (!buffer) return board->status; @@ -1216,7 +1220,7 @@ static unsigned int ni_usb_update_status(gpib_board_t *board, unsigned int clear USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x200, 0x0, buffer, buffer_length, 1000); if (retval != buffer_length) { - dev_err(&usb_dev->dev, "%s: usb_control_msg returned %i\n", __func__, retval); + dev_err(&usb_dev->dev, "usb_control_msg returned %i\n", retval); kfree(buffer); return board->status; } @@ -1235,7 +1239,6 @@ static void ni_usb_stop(struct ni_usb_priv *ni_priv) u8 *buffer; struct ni_usb_status_block status;
- //printk("%s: receive control pipe is %i\n", __func__, pipe); buffer = kmalloc(buffer_length, GFP_KERNEL); if (!buffer) return; @@ -1244,7 +1247,7 @@ static void ni_usb_stop(struct ni_usb_priv *ni_priv) USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0, 0x0, buffer, buffer_length, 1000); if (retval != buffer_length) { - dev_err(&usb_dev->dev, "%s: usb_control_msg returned %i\n", __func__, retval); + dev_err(&usb_dev->dev, "usb_control_msg returned %i\n", retval); kfree(buffer); return; } @@ -1256,11 +1259,14 @@ static int ni_usb_primary_address(gpib_board_t *board, unsigned int address) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; int i = 0; struct ni_usb_register writes[2]; unsigned int ibsta;
+ if (!ni_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); writes[i].device = NIUSB_SUBDEV_TNT4882; writes[i].address = nec7210_to_tnt4882_offset(ADR); writes[i].value = address; @@ -1271,7 +1277,7 @@ static int ni_usb_primary_address(gpib_board_t *board, unsigned int address) i++; retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return retval; } ni_usb_soft_update_status(board, ibsta, 0); @@ -1311,15 +1317,18 @@ static int ni_usb_secondary_address(gpib_board_t *board, unsigned int address, i { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; int i = 0; struct ni_usb_register writes[3]; unsigned int ibsta;
+ if (!ni_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); i += ni_usb_write_sad(writes, address, enable); retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return retval; } ni_usb_soft_update_status(board, ibsta, 0); @@ -1330,7 +1339,7 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; u8 *out_data, *in_data; static const int out_data_length = 0x10; static const int in_data_length = 0x20; @@ -1339,6 +1348,9 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result) int j = 0; struct ni_usb_status_block status;
+ if (!ni_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); out_data = kmalloc(out_data_length, GFP_KERNEL); if (!out_data) return -ENOMEM; @@ -1353,8 +1365,8 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
kfree(out_data); if (retval || bytes_written != i) { - dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n", - __func__, retval, bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n", + retval, bytes_written, i); return retval; } in_data = kmalloc(in_data_length, GFP_KERNEL); @@ -1366,8 +1378,8 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result) &bytes_read, 1000, 1);
if (retval && retval != -ERESTARTSYS) { - dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); kfree(in_data); return retval; } @@ -1382,18 +1394,21 @@ static void ni_usb_parallel_poll_configure(gpib_board_t *board, uint8_t config) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; int i = 0; struct ni_usb_register writes[1]; unsigned int ibsta;
+ if (!ni_priv->bus_interface) + return; // -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); writes[i].device = NIUSB_SUBDEV_TNT4882; writes[i].address = nec7210_to_tnt4882_offset(AUXMR); writes[i].value = PPR | config; i++; retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return;// retval; } ni_usb_soft_update_status(board, ibsta, 0); @@ -1404,11 +1419,14 @@ static void ni_usb_parallel_poll_response(gpib_board_t *board, int ist) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; int i = 0; struct ni_usb_register writes[1]; unsigned int ibsta;
+ if (!ni_priv->bus_interface) + return; // -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); writes[i].device = NIUSB_SUBDEV_TNT4882; writes[i].address = nec7210_to_tnt4882_offset(AUXMR); if (ist) @@ -1418,7 +1436,7 @@ static void ni_usb_parallel_poll_response(gpib_board_t *board, int ist) i++; retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return;// retval; } ni_usb_soft_update_status(board, ibsta, 0); @@ -1429,18 +1447,21 @@ static void ni_usb_serial_poll_response(gpib_board_t *board, u8 status) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; int i = 0; struct ni_usb_register writes[1]; unsigned int ibsta;
+ if (!ni_priv->bus_interface) + return; // -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); writes[i].device = NIUSB_SUBDEV_TNT4882; writes[i].address = nec7210_to_tnt4882_offset(SPMR); writes[i].value = status; i++; retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return;// retval; } ni_usb_soft_update_status(board, ibsta, 0); @@ -1456,18 +1477,21 @@ static void ni_usb_return_to_local(gpib_board_t *board) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; int i = 0; struct ni_usb_register writes[1]; unsigned int ibsta;
+ if (!ni_priv->bus_interface) + return; // -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); writes[i].device = NIUSB_SUBDEV_TNT4882; writes[i].address = nec7210_to_tnt4882_offset(AUXMR); writes[i].value = AUX_RTL; i++; retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return;// retval; } ni_usb_soft_update_status(board, ibsta, 0); @@ -1478,7 +1502,7 @@ static int ni_usb_line_status(const gpib_board_t *board) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; u8 *out_data, *in_data; static const int out_data_length = 0x20; static const int in_data_length = 0x20; @@ -1488,6 +1512,9 @@ static int ni_usb_line_status(const gpib_board_t *board) int line_status = ValidALL; // NI windows driver reads 0xd(HSSEL), 0xc (ARD0), 0x1f (BSR)
+ if (!ni_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); out_data = kmalloc(out_data_length, GFP_KERNEL); if (!out_data) return -ENOMEM; @@ -1509,15 +1536,14 @@ static int ni_usb_line_status(const gpib_board_t *board) if (retval || bytes_written != i) { mutex_unlock(&ni_priv->addressed_transfer_lock); if (retval != -EAGAIN) - dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n", - __func__, retval, bytes_written, i); + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n", + retval, bytes_written, i); return retval; }
in_data = kmalloc(in_data_length, GFP_KERNEL); if (!in_data) { mutex_unlock(&ni_priv->addressed_transfer_lock); - dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__); return -ENOMEM; } retval = ni_usb_nonblocking_receive_bulk_msg(ni_priv, in_data, in_data_length, @@ -1527,8 +1553,8 @@ static int ni_usb_line_status(const gpib_board_t *board)
if (retval) { if (retval != -EAGAIN) - dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); kfree(in_data); return retval; } @@ -1595,16 +1621,19 @@ static unsigned int ni_usb_t1_delay(gpib_board_t *board, unsigned int nano_sec) { int retval; struct ni_usb_priv *ni_priv = board->private_data; - struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); + struct usb_device *usb_dev; struct ni_usb_register writes[3]; unsigned int ibsta; unsigned int actual_ns; int i;
+ if (!ni_priv->bus_interface) + return -ENODEV; + usb_dev = interface_to_usbdev(ni_priv->bus_interface); i = ni_usb_setup_t1_delay(writes, nano_sec, &actual_ns); retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return -1; //FIXME should change return type to int for error reporting } board->t1_nano_sec = actual_ns; @@ -1736,7 +1765,7 @@ static int ni_usb_setup_init(gpib_board_t *board, struct ni_usb_register *writes writes[i].value = AUX_CPPF; i++; if (i > NUM_INIT_WRITES) { - dev_err(&usb_dev->dev, "%s: bug!, buffer overrun, i=%i\n", __func__, i); + dev_err(&usb_dev->dev, "bug!, buffer overrun, i=%i\n", i); return 0; } return i; @@ -1762,7 +1791,7 @@ static int ni_usb_init(gpib_board_t *board) return -EFAULT; kfree(writes); if (retval) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return retval; } ni_usb_soft_update_status(board, ibsta, 0); @@ -1778,9 +1807,6 @@ static void ni_usb_interrupt_complete(struct urb *urb) struct ni_usb_status_block status; unsigned long flags;
-// printk("debug: %s: status=0x%x, error_count=%i, actual_length=%i\n", __func__, -// urb->status, urb->error_count, urb->actual_length); - switch (urb->status) { /* success */ case 0: @@ -1793,23 +1819,21 @@ static void ni_usb_interrupt_complete(struct urb *urb) default: /* other error, resubmit */ retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_ATOMIC); if (retval) - dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__); + dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n"); return; }
ni_usb_parse_status_block(urb->transfer_buffer, &status); -// printk("debug: ibsta=0x%x\n", status.ibsta);
spin_lock_irqsave(&board->spinlock, flags); ni_priv->monitored_ibsta_bits &= ~status.ibsta; -// printk("debug: monitored_ibsta_bits=0x%x\n", ni_priv->monitored_ibsta_bits); spin_unlock_irqrestore(&board->spinlock, flags);
wake_up_interruptible(&board->wait);
retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_ATOMIC); if (retval) - dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__); + dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n"); }
static int ni_usb_set_interrupt_monitor(gpib_board_t *board, unsigned int monitored_bits) @@ -1821,22 +1845,20 @@ static int ni_usb_set_interrupt_monitor(gpib_board_t *board, unsigned int monito u8 *buffer; struct ni_usb_status_block status; unsigned long flags; - //printk("%s: receive control pipe is %i\n", __func__, pipe); + buffer = kmalloc(buffer_length, GFP_KERNEL); if (!buffer) return -ENOMEM;
spin_lock_irqsave(&board->spinlock, flags); ni_priv->monitored_ibsta_bits = ni_usb_ibsta_monitor_mask & monitored_bits; -// dev_err(&usb_dev->dev, "debug: %s: monitored_ibsta_bits=0x%x\n", -// __func__, ni_priv->monitored_ibsta_bits); spin_unlock_irqrestore(&board->spinlock, flags); retval = ni_usb_receive_control_msg(ni_priv, NI_USB_WAIT_REQUEST, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x300, ni_usb_ibsta_monitor_mask & monitored_bits, buffer, buffer_length, 1000); if (retval != buffer_length) { - dev_err(&usb_dev->dev, "%s: usb_control_msg returned %i\n", __func__, retval); + dev_err(&usb_dev->dev, "usb_control_msg returned %i\n", retval); kfree(buffer); return -1; } @@ -1872,8 +1894,7 @@ static int ni_usb_setup_urbs(gpib_board_t *board) retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_KERNEL); mutex_unlock(&ni_priv->interrupt_transfer_lock); if (retval) { - dev_err(&usb_dev->dev, "%s: failed to submit first interrupt urb, retval=%i\n", - __func__, retval); + dev_err(&usb_dev->dev, "failed to submit first interrupt urb, retval=%i\n", retval); return retval; } return 0; @@ -1904,7 +1925,6 @@ static int ni_usb_b_read_serial_number(struct ni_usb_priv *ni_priv) int j; unsigned int serial_number;
-// printk("%s: %s\n", __func__); in_data = kmalloc(in_data_length, GFP_KERNEL); if (!in_data) return -ENOMEM; @@ -1924,20 +1944,19 @@ static int ni_usb_b_read_serial_number(struct ni_usb_priv *ni_priv) i += ni_usb_bulk_termination(&out_data[i]); retval = ni_usb_send_bulk_msg(ni_priv, out_data, out_data_length, &bytes_written, 1000); if (retval) { - dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%li\n", - __func__, + dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%li\n", retval, bytes_written, (long)out_data_length); goto serial_out; } retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0); if (retval) { - dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n", - __func__, retval, bytes_read); + dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n", + retval, bytes_read); ni_usb_dump_raw_block(in_data, bytes_read); goto serial_out; } if (ARRAY_SIZE(results) < num_reads) { - dev_err(&usb_dev->dev, "Setup bug\n"); + dev_err(&usb_dev->dev, "serial number eetup bug\n"); retval = -EINVAL; goto serial_out; } @@ -1945,7 +1964,7 @@ static int ni_usb_b_read_serial_number(struct ni_usb_priv *ni_priv) serial_number = 0; for (j = 0; j < num_reads; ++j) serial_number |= (results[j] & 0xff) << (8 * j); - dev_info(&usb_dev->dev, "%s: board serial number is 0x%x\n", __func__, serial_number); + dev_dbg(&usb_dev->dev, "board serial number is 0x%x\n", serial_number); retval = 0; serial_out: kfree(in_data); @@ -1973,22 +1992,22 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv) USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0, 0x0, buffer, buffer_size, 1000); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n", - __func__, NI_USB_SERIAL_NUMBER_REQUEST, retval); + dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n", + NI_USB_SERIAL_NUMBER_REQUEST, retval); goto ready_out; } j = 0; if (buffer[j] != NI_USB_SERIAL_NUMBER_REQUEST) { - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x%x\n", - __func__, j, (int)buffer[j], NI_USB_SERIAL_NUMBER_REQUEST); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x%x\n", + j, (int)buffer[j], NI_USB_SERIAL_NUMBER_REQUEST); unexpected = 1; } if (unexpected) ni_usb_dump_raw_block(buffer, retval); // NI-USB-HS+ pads the serial with 0x0 to make 16 bytes if (retval != 5 && retval != 16) { - dev_err(&usb_dev->dev, "%s: received unexpected number of bytes = %i, expected 5 or 16\n", - __func__, retval); + dev_err(&usb_dev->dev, "received unexpected number of bytes = %i, expected 5 or 16\n", + retval); ni_usb_dump_raw_block(buffer, retval); } serial_number = 0; @@ -1996,7 +2015,7 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv) serial_number |= (buffer[++j] << 8); serial_number |= (buffer[++j] << 16); serial_number |= (buffer[++j] << 24); - dev_info(&usb_dev->dev, "%s: board serial number is 0x%x\n", __func__, serial_number); + dev_dbg(&usb_dev->dev, "board serial number is 0x%x\n", serial_number); for (i = 0; i < timeout; ++i) { int ready = 0;
@@ -2004,26 +2023,26 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv) USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0, 0x0, buffer, buffer_size, 100); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n", - __func__, NI_USB_POLL_READY_REQUEST, retval); + dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n", + NI_USB_POLL_READY_REQUEST, retval); goto ready_out; } j = 0; unexpected = 0; if (buffer[j] != NI_USB_POLL_READY_REQUEST) { // [0] - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x%x\n", - __func__, j, (int)buffer[j], NI_USB_POLL_READY_REQUEST); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x%x\n", + j, (int)buffer[j], NI_USB_POLL_READY_REQUEST); unexpected = 1; } ++j; if (buffer[j] != 0x1 && buffer[j] != 0x0) { // [1] HS+ sends 0x0 - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n", - __func__, j, (int)buffer[j]); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n", + j, (int)buffer[j]); unexpected = 1; } if (buffer[++j] != 0x0) { // [2] - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x%x\n", - __func__, j, (int)buffer[j], 0x0); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x%x\n", + j, (int)buffer[j], 0x0); unexpected = 1; } ++j; @@ -2031,22 +2050,22 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv) // NI-USB-HS+ sends 0x0 if (buffer[j] != 0x1 && buffer[j] != 0x8 && buffer[j] != 0x7 && buffer[j] != 0x0) { // [3] - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0, 0x1, 0x7 or 0x8\n", - __func__, j, (int)buffer[j]); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x0, 0x1, 0x7 or 0x8\n", + j, (int)buffer[j]); unexpected = 1; } ++j; // NI-USB-HS+ sends 0 here if (buffer[j] != 0x30 && buffer[j] != 0x0) { // [4] - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x30\n", - __func__, j, (int)buffer[j]); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x30\n", + j, (int)buffer[j]); unexpected = 1; } ++j; // MC usb-488 (and sometimes NI-USB-HS?) and NI-USB-HS+ sends 0x0 here if (buffer[j] != 0x1 && buffer[j] != 0x0) { // [5] - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n", - __func__, j, (int)buffer[j]); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n", + j, (int)buffer[j]); unexpected = 1; } if (buffer[++j] != 0x0) { // [6] @@ -2054,8 +2073,8 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv) // NI-USB-HS+ sends 0xf here if (buffer[j] != 0x2 && buffer[j] != 0xe && buffer[j] != 0xf && buffer[j] != 0x16) { - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x2, 0xe, 0xf or 0x16\n", - __func__, j, (int)buffer[j]); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x2, 0xe, 0xf or 0x16\n", + j, (int)buffer[j]); unexpected = 1; } } @@ -2064,30 +2083,30 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv) // MC usb-488 sends 0x5 here; MC usb-488A sends 0x6 here if (buffer[j] != 0x3 && buffer[j] != 0x5 && buffer[j] != 0x6 && buffer[j] != 0x8) { - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x3 or 0x5, 0x6 or 0x08\n", - __func__, j, (int)buffer[j]); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x3 or 0x5, 0x6 or 0x08\n", + j, (int)buffer[j]); unexpected = 1; } } ++j; if (buffer[j] != 0x0 && buffer[j] != 0x2) { // [8] MC usb-488 sends 0x2 here - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x2\n", - __func__, j, (int)buffer[j]); + dev_err(&usb_dev->dev, " unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x2\n", + j, (int)buffer[j]); unexpected = 1; } ++j; // MC usb-488A and NI-USB-HS sends 0x3 here; NI-USB-HS+ sends 0x30 here if (buffer[j] != 0x0 && buffer[j] != 0x3 && buffer[j] != 0x30) { // [9] - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0, 0x3 or 0x30\n", - __func__, j, (int)buffer[j]); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x0, 0x3 or 0x30\n", + j, (int)buffer[j]); unexpected = 1; } if (buffer[++j] != 0x0) { ready = 1; if (buffer[j] != 0x96 && buffer[j] != 0x7 && buffer[j] != 0x6e) { // [10] MC usb-488 sends 0x7 here - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x96, 0x07 or 0x6e\n", - __func__, j, (int)buffer[j]); + dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x96, 0x07 or 0x6e\n", + j, (int)buffer[j]); unexpected = 1; } } @@ -2097,7 +2116,6 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv) break; retval = msleep_interruptible(msec_sleep_duration); if (retval) { - dev_err(&usb_dev->dev, "ni_usb_gpib: msleep interrupted\n"); retval = -ERESTARTSYS; goto ready_out; } @@ -2106,7 +2124,7 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
ready_out: kfree(buffer); - dev_dbg(&usb_dev->dev, "%s: exit retval=%d\n", __func__, retval); + dev_dbg(&usb_dev->dev, "exit retval=%d\n", retval); return retval; }
@@ -2134,14 +2152,14 @@ static int ni_usb_hs_plus_extra_init(struct ni_usb_priv *ni_priv) USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0, 0x0, buffer, transfer_size, 1000); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n", - __func__, NI_USB_HS_PLUS_0x48_REQUEST, retval); + dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n", + NI_USB_HS_PLUS_0x48_REQUEST, retval); break; } // expected response data: 48 f3 30 00 00 00 00 00 00 00 00 00 00 00 00 00 if (buffer[0] != NI_USB_HS_PLUS_0x48_REQUEST) - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[0]=0x%x, expected 0x%x\n", - __func__, (int)buffer[0], NI_USB_HS_PLUS_0x48_REQUEST); + dev_err(&usb_dev->dev, "unexpected data: buffer[0]=0x%x, expected 0x%x\n", + (int)buffer[0], NI_USB_HS_PLUS_0x48_REQUEST);
transfer_size = 2;
@@ -2149,14 +2167,14 @@ static int ni_usb_hs_plus_extra_init(struct ni_usb_priv *ni_priv) USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x1, 0x0, buffer, transfer_size, 1000); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n", - __func__, NI_USB_HS_PLUS_LED_REQUEST, retval); + dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n", + NI_USB_HS_PLUS_LED_REQUEST, retval); break; } // expected response data: 4b 00 if (buffer[0] != NI_USB_HS_PLUS_LED_REQUEST) - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[0]=0x%x, expected 0x%x\n", - __func__, (int)buffer[0], NI_USB_HS_PLUS_LED_REQUEST); + dev_err(&usb_dev->dev, "unexpected data: buffer[0]=0x%x, expected 0x%x\n", + (int)buffer[0], NI_USB_HS_PLUS_LED_REQUEST);
transfer_size = 9;
@@ -2165,15 +2183,14 @@ static int ni_usb_hs_plus_extra_init(struct ni_usb_priv *ni_priv) USB_RECIP_INTERFACE, 0x0, 0x1, buffer, transfer_size, 1000); if (retval < 0) { - dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n", - __func__, NI_USB_HS_PLUS_0xf8_REQUEST, retval); + dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n", + NI_USB_HS_PLUS_0xf8_REQUEST, retval); break; } // expected response data: f8 01 00 00 00 01 00 00 00 if (buffer[0] != NI_USB_HS_PLUS_0xf8_REQUEST) - dev_err(&usb_dev->dev, "%s: unexpected data: buffer[0]=0x%x, expected 0x%x\n", - __func__, (int)buffer[0], NI_USB_HS_PLUS_0xf8_REQUEST); - + dev_err(&usb_dev->dev, "unexpected data: buffer[0]=0x%x, expected 0x%x\n", + (int)buffer[0], NI_USB_HS_PLUS_0xf8_REQUEST); } while (0);
// cleanup @@ -2192,7 +2209,7 @@ static inline int ni_usb_device_match(struct usb_interface *interface, static int ni_usb_attach(gpib_board_t *board, const gpib_board_config_t *config) { int retval; - int i; + int i, index; struct ni_usb_priv *ni_priv; int product_id; struct usb_device *usb_dev; @@ -2211,19 +2228,17 @@ static int ni_usb_attach(gpib_board_t *board, const gpib_board_config_t *config) ni_priv->bus_interface = ni_usb_driver_interfaces[i]; usb_set_intfdata(ni_usb_driver_interfaces[i], board); usb_dev = interface_to_usbdev(ni_priv->bus_interface); - dev_info(&usb_dev->dev, - "bus %d dev num %d attached to gpib minor %d, NI usb interface %i\n", - usb_dev->bus->busnum, usb_dev->devnum, board->minor, i); + index = i; break; } } if (i == MAX_NUM_NI_USB_INTERFACES) { mutex_unlock(&ni_usb_hotplug_lock); - pr_err("No supported NI usb gpib adapters found, have you loaded its firmware?\n"); + dev_err(board->gpib_dev, "No supported adapters found, have you loaded its firmware?\n"); return -ENODEV; } if (usb_reset_configuration(interface_to_usbdev(ni_priv->bus_interface))) - dev_err(&usb_dev->dev, "ni_usb_gpib: usb_reset_configuration() failed.\n"); + dev_err(&usb_dev->dev, "usb_reset_configuration() failed.\n");
product_id = le16_to_cpu(usb_dev->descriptor.idProduct); ni_priv->product_id = product_id; @@ -2296,7 +2311,9 @@ static int ni_usb_attach(gpib_board_t *board, const gpib_board_config_t *config) }
mutex_unlock(&ni_usb_hotplug_lock); - dev_info(&usb_dev->dev, "%s: attached\n", __func__); + dev_info(&usb_dev->dev, + "bus %d dev num %d attached to gpib%d, intf %i\n", + usb_dev->bus->busnum, usb_dev->devnum, board->minor, index); return retval; }
@@ -2304,27 +2321,19 @@ static int ni_usb_shutdown_hardware(struct ni_usb_priv *ni_priv) { struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface); int retval; - int i = 0; struct ni_usb_register writes[2]; static const int writes_length = ARRAY_SIZE(writes); unsigned int ibsta;
-// printk("%s: %s\n", __func__); - writes[i].device = NIUSB_SUBDEV_TNT4882; - writes[i].address = nec7210_to_tnt4882_offset(AUXMR); - writes[i].value = AUX_CR; - i++; - writes[i].device = NIUSB_SUBDEV_UNKNOWN3; - writes[i].address = 0x10; - writes[i].value = 0x0; - i++; - if (i > writes_length) { - dev_err(&usb_dev->dev, "%s: bug!, buffer overrun, i=%i\n", __func__, i); - return -EINVAL; - } - retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta); + writes[0].device = NIUSB_SUBDEV_TNT4882; + writes[0].address = nec7210_to_tnt4882_offset(AUXMR); + writes[0].value = AUX_CR; + writes[1].device = NIUSB_SUBDEV_UNKNOWN3; + writes[1].address = 0x10; + writes[1].value = 0x0; + retval = ni_usb_write_registers(ni_priv, writes, writes_length, &ibsta); if (retval) { - dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval); + dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval); return retval; } return 0; @@ -2413,7 +2422,7 @@ static int ni_usb_driver_probe(struct usb_interface *interface, const struct usb if (i == MAX_NUM_NI_USB_INTERFACES) { usb_put_dev(usb_dev); mutex_unlock(&ni_usb_hotplug_lock); - dev_err(&usb_dev->dev, "%s: ni_usb_driver_interfaces[] full\n", __func__); + dev_err(&usb_dev->dev, "ni_usb_driver_interfaces[] full\n"); return -1; } path = kmalloc(path_length, GFP_KERNEL); @@ -2423,7 +2432,7 @@ static int ni_usb_driver_probe(struct usb_interface *interface, const struct usb return -ENOMEM; } usb_make_path(usb_dev, path, path_length); - dev_info(&usb_dev->dev, "ni_usb_gpib: probe succeeded for path: %s\n", path); + dev_info(&usb_dev->dev, "probe succeeded for path: %s\n", path); kfree(path); mutex_unlock(&ni_usb_hotplug_lock); return 0; @@ -2458,8 +2467,7 @@ static void ni_usb_driver_disconnect(struct usb_interface *interface) } } if (i == MAX_NUM_NI_USB_INTERFACES) - dev_err(&usb_dev->dev, "%s: unable to find interface in ni_usb_driver_interfaces[]? bug?\n", - __func__); + dev_err(&usb_dev->dev, "unable to find interface bug?\n"); usb_put_dev(usb_dev); mutex_unlock(&ni_usb_hotplug_lock); } @@ -2498,9 +2506,9 @@ static int ni_usb_driver_suspend(struct usb_interface *interface, pm_message_t m ni_usb_cleanup_urbs(ni_priv); mutex_unlock(&ni_priv->interrupt_transfer_lock); } - dev_info(&usb_dev->dev, - "bus %d dev num %d gpib minor %d, ni usb interface %i suspended\n", - usb_dev->bus->busnum, usb_dev->devnum, board->minor, i); + dev_dbg(&usb_dev->dev, + "bus %d dev num %d gpib%d, interface %i suspended\n", + usb_dev->bus->busnum, usb_dev->devnum, board->minor, i); }
mutex_unlock(&ni_usb_hotplug_lock); @@ -2535,15 +2543,15 @@ static int ni_usb_driver_resume(struct usb_interface *interface) mutex_lock(&ni_priv->interrupt_transfer_lock); retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_KERNEL); if (retval) { - dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb, retval=%i\n", - __func__, retval); + dev_err(&usb_dev->dev, "resume failed to resubmit interrupt urb, retval=%i\n", + retval); mutex_unlock(&ni_priv->interrupt_transfer_lock); mutex_unlock(&ni_usb_hotplug_lock); return retval; } mutex_unlock(&ni_priv->interrupt_transfer_lock); } else { - dev_err(&usb_dev->dev, "%s: bug! int urb not set up\n", __func__); + dev_err(&usb_dev->dev, "bug! resume int urb not set up\n"); mutex_unlock(&ni_usb_hotplug_lock); return -EINVAL; } @@ -2600,9 +2608,9 @@ static int ni_usb_driver_resume(struct usb_interface *interface) if (ni_priv->ren_state) ni_usb_remote_enable(board, 1);
- dev_info(&usb_dev->dev, - "bus %d dev num %d gpib minor %d, ni usb interface %i resumed\n", - usb_dev->bus->busnum, usb_dev->devnum, board->minor, i); + dev_dbg(&usb_dev->dev, + "bus %d dev num %d gpib%d, interface %i resumed\n", + usb_dev->bus->busnum, usb_dev->devnum, board->minor, i); }
mutex_unlock(&ni_usb_hotplug_lock); @@ -2610,7 +2618,7 @@ static int ni_usb_driver_resume(struct usb_interface *interface) }
static struct usb_driver ni_usb_bus_driver = { - .name = "ni_usb_gpib", + .name = DRV_NAME, .probe = ni_usb_driver_probe, .disconnect = ni_usb_driver_disconnect, .suspend = ni_usb_driver_suspend, @@ -2623,19 +2631,18 @@ static int __init ni_usb_init_module(void) int i; int ret;
- pr_info("ni_usb_gpib driver loading\n"); for (i = 0; i < MAX_NUM_NI_USB_INTERFACES; i++) ni_usb_driver_interfaces[i] = NULL;
ret = usb_register(&ni_usb_bus_driver); if (ret) { - pr_err("ni_usb_gpib: usb_register failed: error = %d\n", ret); + pr_err("usb_register failed: error = %d\n", ret); return ret; }
ret = gpib_register_driver(&ni_usb_gpib_interface, THIS_MODULE); if (ret) { - pr_err("ni_usb_gpib: gpib_register_driver failed: error = %d\n", ret); + pr_err("gpib_register_driver failed: error = %d\n", ret); return ret; }
@@ -2644,7 +2651,6 @@ static int __init ni_usb_init_module(void)
static void __exit ni_usb_exit_module(void) { - pr_info("ni_usb_gpib driver unloading\n"); gpib_unregister_driver(&ni_usb_gpib_interface); usb_deregister(&ni_usb_bus_driver); } diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig index 8d48c61961a6..353e6ee2c145 100644 --- a/drivers/staging/rtl8723bs/Kconfig +++ b/drivers/staging/rtl8723bs/Kconfig @@ -4,6 +4,7 @@ config RTL8723BS depends on WLAN && MMC && CFG80211 depends on m select CRYPTO + select CRYPTO_LIB_AES select CRYPTO_LIB_ARC4 help This option enables support for RTL8723BS SDIO drivers, such as diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index a4e83e5d619b..0c7ea2d0ee85 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -308,6 +308,20 @@ static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state * return (struct vchiq_arm_state *)state->platform_state; }
+static void +vchiq_platform_uninit(struct vchiq_drv_mgmt *mgmt) +{ + struct vchiq_arm_state *arm_state; + + kthread_stop(mgmt->state.sync_thread); + kthread_stop(mgmt->state.recycle_thread); + kthread_stop(mgmt->state.slot_handler_thread); + + arm_state = vchiq_platform_get_arm_state(&mgmt->state); + if (!IS_ERR_OR_NULL(arm_state->ka_thread)) + kthread_stop(arm_state->ka_thread); +} + void vchiq_dump_platform_state(struct seq_file *f) { seq_puts(f, " Platform: 2835 (VC master)\n"); @@ -1386,8 +1400,6 @@ static int vchiq_probe(struct platform_device *pdev) return ret; }
- vchiq_debugfs_init(&mgmt->state); - dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n", VCHIQ_VERSION, VCHIQ_VERSION_MIN);
@@ -1398,9 +1410,12 @@ static int vchiq_probe(struct platform_device *pdev) ret = vchiq_register_chrdev(&pdev->dev); if (ret) { dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n"); + vchiq_platform_uninit(mgmt); return ret; }
+ vchiq_debugfs_init(&mgmt->state); + bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio"); bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
@@ -1410,19 +1425,12 @@ static int vchiq_probe(struct platform_device *pdev) static void vchiq_remove(struct platform_device *pdev) { struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev); - struct vchiq_arm_state *arm_state;
vchiq_device_unregister(bcm2835_audio); vchiq_device_unregister(bcm2835_camera); vchiq_debugfs_deinit(); vchiq_deregister_chrdev(); - - kthread_stop(mgmt->state.sync_thread); - kthread_stop(mgmt->state.recycle_thread); - kthread_stop(mgmt->state.slot_handler_thread); - - arm_state = vchiq_platform_get_arm_state(&mgmt->state); - kthread_stop(arm_state->ka_thread); + vchiq_platform_uninit(mgmt); }
static struct platform_driver vchiq_driver = { diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 761c511aea07..c7b7da629741 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -176,7 +176,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
memset(tl_cmd, 0, sizeof(*tl_cmd)); tl_cmd->sc = sc; - tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag; + tl_cmd->sc_cmd_tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
tcm_loop_target_queue_cmd(tl_cmd); return 0; @@ -242,7 +242,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, - scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK); + blk_mq_unique_tag(scsi_cmd_to_rq(sc)), + TMR_ABORT_TASK); return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; }
diff --git a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c index 543b03960e99..57b90005888a 100644 --- a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c @@ -45,6 +45,9 @@ static int int3402_thermal_probe(struct platform_device *pdev) struct int3402_thermal_data *d; int ret;
+ if (!adev) + return -ENODEV; + if (!acpi_has_method(adev->handle, "_TMP")) return -ENODEV;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 5e9ca4376d68..94fa981081fd 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -486,7 +486,8 @@ static int do_output_char(u8 c, struct tty_struct *tty, int space) static int process_output(u8 c, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; - int space, retval; + unsigned int space; + int retval;
mutex_lock(&ldata->output_lock);
@@ -522,16 +523,16 @@ static ssize_t process_output_block(struct tty_struct *tty, const u8 *buf, unsigned int nr) { struct n_tty_data *ldata = tty->disc_data; - int space; - int i; + unsigned int space; + int i; const u8 *cp;
mutex_lock(&ldata->output_lock);
space = tty_write_room(tty); - if (space <= 0) { + if (space == 0) { mutex_unlock(&ldata->output_lock); - return space; + return 0; } if (nr > space) nr = space; @@ -696,7 +697,7 @@ static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail, static size_t __process_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; - int space, old_space; + unsigned int space, old_space; size_t tail; u8 c;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 79623b2482a0..9fdb66f2fcb8 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -441,7 +441,7 @@ static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport)
static void lpuart_stop_tx(struct uart_port *port) { - unsigned char temp; + u8 temp;
temp = readb(port->membase + UARTCR2); temp &= ~(UARTCR2_TIE | UARTCR2_TCIE); @@ -450,7 +450,7 @@ static void lpuart_stop_tx(struct uart_port *port)
static void lpuart32_stop_tx(struct uart_port *port) { - unsigned long temp; + u32 temp;
temp = lpuart32_read(port, UARTCTRL); temp &= ~(UARTCTRL_TIE | UARTCTRL_TCIE); @@ -459,7 +459,7 @@ static void lpuart32_stop_tx(struct uart_port *port)
static void lpuart_stop_rx(struct uart_port *port) { - unsigned char temp; + u8 temp;
temp = readb(port->membase + UARTCR2); writeb(temp & ~UARTCR2_RE, port->membase + UARTCR2); @@ -467,7 +467,7 @@ static void lpuart_stop_rx(struct uart_port *port)
static void lpuart32_stop_rx(struct uart_port *port) { - unsigned long temp; + u32 temp;
temp = lpuart32_read(port, UARTCTRL); lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL); @@ -581,7 +581,7 @@ static int lpuart_dma_tx_request(struct uart_port *port) ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
if (ret) { - dev_err(sport->port.dev, + dev_err(port->dev, "DMA slave config failed, err = %d\n", ret); return ret; } @@ -611,13 +611,13 @@ static void lpuart_flush_buffer(struct uart_port *port) }
if (lpuart_is_32(sport)) { - val = lpuart32_read(&sport->port, UARTFIFO); + val = lpuart32_read(port, UARTFIFO); val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH; - lpuart32_write(&sport->port, val, UARTFIFO); + lpuart32_write(port, val, UARTFIFO); } else { - val = readb(sport->port.membase + UARTCFIFO); + val = readb(port->membase + UARTCFIFO); val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH; - writeb(val, sport->port.membase + UARTCFIFO); + writeb(val, port->membase + UARTCFIFO); } }
@@ -639,38 +639,36 @@ static void lpuart32_wait_bit_set(struct uart_port *port, unsigned int offset,
static int lpuart_poll_init(struct uart_port *port) { - struct lpuart_port *sport = container_of(port, - struct lpuart_port, port); unsigned long flags; - unsigned char temp; + u8 temp;
- sport->port.fifosize = 0; + port->fifosize = 0;
- uart_port_lock_irqsave(&sport->port, &flags); + uart_port_lock_irqsave(port, &flags); /* Disable Rx & Tx */ - writeb(0, sport->port.membase + UARTCR2); + writeb(0, port->membase + UARTCR2);
- temp = readb(sport->port.membase + UARTPFIFO); + temp = readb(port->membase + UARTPFIFO); /* Enable Rx and Tx FIFO */ writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE, - sport->port.membase + UARTPFIFO); + port->membase + UARTPFIFO);
/* flush Tx and Rx FIFO */ writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, - sport->port.membase + UARTCFIFO); + port->membase + UARTCFIFO);
/* explicitly clear RDRF */ - if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) { - readb(sport->port.membase + UARTDR); - writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO); + if (readb(port->membase + UARTSR1) & UARTSR1_RDRF) { + readb(port->membase + UARTDR); + writeb(UARTSFIFO_RXUF, port->membase + UARTSFIFO); }
- writeb(0, sport->port.membase + UARTTWFIFO); - writeb(1, sport->port.membase + UARTRWFIFO); + writeb(0, port->membase + UARTTWFIFO); + writeb(1, port->membase + UARTRWFIFO);
/* Enable Rx and Tx */ - writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2); - uart_port_unlock_irqrestore(&sport->port, flags); + writeb(UARTCR2_RE | UARTCR2_TE, port->membase + UARTCR2); + uart_port_unlock_irqrestore(port, flags);
return 0; } @@ -693,33 +691,32 @@ static int lpuart_poll_get_char(struct uart_port *port) static int lpuart32_poll_init(struct uart_port *port) { unsigned long flags; - struct lpuart_port *sport = container_of(port, struct lpuart_port, port); u32 temp;
- sport->port.fifosize = 0; + port->fifosize = 0;
- uart_port_lock_irqsave(&sport->port, &flags); + uart_port_lock_irqsave(port, &flags);
/* Disable Rx & Tx */ - lpuart32_write(&sport->port, 0, UARTCTRL); + lpuart32_write(port, 0, UARTCTRL);
- temp = lpuart32_read(&sport->port, UARTFIFO); + temp = lpuart32_read(port, UARTFIFO);
/* Enable Rx and Tx FIFO */ - lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO); + lpuart32_write(port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
/* flush Tx and Rx FIFO */ - lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO); + lpuart32_write(port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
/* explicitly clear RDRF */ - if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) { - lpuart32_read(&sport->port, UARTDATA); - lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO); + if (lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF) { + lpuart32_read(port, UARTDATA); + lpuart32_write(port, UARTFIFO_RXUF, UARTFIFO); }
/* Enable Rx and Tx */ - lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL); - uart_port_unlock_irqrestore(&sport->port, flags); + lpuart32_write(port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL); + uart_port_unlock_irqrestore(port, flags);
return 0; } @@ -752,7 +749,7 @@ static inline void lpuart_transmit_buffer(struct lpuart_port *sport) static inline void lpuart32_transmit_buffer(struct lpuart_port *sport) { struct tty_port *tport = &sport->port.state->port; - unsigned long txcnt; + u32 txcnt; unsigned char c;
if (sport->port.x_char) { @@ -789,7 +786,7 @@ static void lpuart_start_tx(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned char temp; + u8 temp;
temp = readb(port->membase + UARTCR2); writeb(temp | UARTCR2_TIE, port->membase + UARTCR2); @@ -806,7 +803,7 @@ static void lpuart_start_tx(struct uart_port *port) static void lpuart32_start_tx(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned long temp; + u32 temp;
if (sport->lpuart_dma_tx_use) { if (!lpuart_stopped_or_empty(port)) @@ -839,8 +836,8 @@ static unsigned int lpuart_tx_empty(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned char sr1 = readb(port->membase + UARTSR1); - unsigned char sfifo = readb(port->membase + UARTSFIFO); + u8 sr1 = readb(port->membase + UARTSR1); + u8 sfifo = readb(port->membase + UARTSFIFO);
if (sport->dma_tx_in_progress) return 0; @@ -855,9 +852,9 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned long stat = lpuart32_read(port, UARTSTAT); - unsigned long sfifo = lpuart32_read(port, UARTFIFO); - unsigned long ctrl = lpuart32_read(port, UARTCTRL); + u32 stat = lpuart32_read(port, UARTSTAT); + u32 sfifo = lpuart32_read(port, UARTFIFO); + u32 ctrl = lpuart32_read(port, UARTCTRL);
if (sport->dma_tx_in_progress) return 0; @@ -884,7 +881,7 @@ static void lpuart_rxint(struct lpuart_port *sport) { unsigned int flg, ignored = 0, overrun = 0; struct tty_port *port = &sport->port.state->port; - unsigned char rx, sr; + u8 rx, sr;
uart_port_lock(&sport->port);
@@ -961,7 +958,7 @@ static void lpuart32_rxint(struct lpuart_port *sport) { unsigned int flg, ignored = 0; struct tty_port *port = &sport->port.state->port; - unsigned long rx, sr; + u32 rx, sr; bool is_break;
uart_port_lock(&sport->port); @@ -1039,7 +1036,7 @@ static void lpuart32_rxint(struct lpuart_port *sport) static irqreturn_t lpuart_int(int irq, void *dev_id) { struct lpuart_port *sport = dev_id; - unsigned char sts; + u8 sts;
sts = readb(sport->port.membase + UARTSR1);
@@ -1113,7 +1110,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) int count, copied;
if (lpuart_is_32(sport)) { - unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); + u32 sr = lpuart32_read(&sport->port, UARTSTAT);
if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { /* Clear the error flags */ @@ -1125,10 +1122,10 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) sport->port.icount.frame++; } } else { - unsigned char sr = readb(sport->port.membase + UARTSR1); + u8 sr = readb(sport->port.membase + UARTSR1);
if (sr & (UARTSR1_PE | UARTSR1_FE)) { - unsigned char cr2; + u8 cr2;
/* Disable receiver during this operation... */ cr2 = readb(sport->port.membase + UARTCR2); @@ -1279,7 +1276,7 @@ static void lpuart32_dma_idleint(struct lpuart_port *sport) static irqreturn_t lpuart32_int(int irq, void *dev_id) { struct lpuart_port *sport = dev_id; - unsigned long sts, rxcount; + u32 sts, rxcount;
sts = lpuart32_read(&sport->port, UARTSTAT); rxcount = lpuart32_read(&sport->port, UARTWATER); @@ -1411,12 +1408,12 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport) dma_async_issue_pending(chan);
if (lpuart_is_32(sport)) { - unsigned long temp = lpuart32_read(&sport->port, UARTBAUD); + u32 temp = lpuart32_read(&sport->port, UARTBAUD);
lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD);
if (sport->dma_idle_int) { - unsigned long ctrl = lpuart32_read(&sport->port, UARTCTRL); + u32 ctrl = lpuart32_read(&sport->port, UARTCTRL);
lpuart32_write(&sport->port, ctrl | UARTCTRL_ILIE, UARTCTRL); } @@ -1449,12 +1446,9 @@ static void lpuart_dma_rx_free(struct uart_port *port) static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios, struct serial_rs485 *rs485) { - struct lpuart_port *sport = container_of(port, - struct lpuart_port, port); - - u8 modem = readb(sport->port.membase + UARTMODEM) & + u8 modem = readb(port->membase + UARTMODEM) & ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE); - writeb(modem, sport->port.membase + UARTMODEM); + writeb(modem, port->membase + UARTMODEM);
if (rs485->flags & SER_RS485_ENABLED) { /* Enable auto RS-485 RTS mode */ @@ -1472,32 +1466,29 @@ static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios, modem &= ~UARTMODEM_TXRTSPOL; }
- writeb(modem, sport->port.membase + UARTMODEM); + writeb(modem, port->membase + UARTMODEM); return 0; }
static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termios, struct serial_rs485 *rs485) { - struct lpuart_port *sport = container_of(port, - struct lpuart_port, port); - - unsigned long modem = lpuart32_read(&sport->port, UARTMODIR) + u32 modem = lpuart32_read(port, UARTMODIR) & ~(UARTMODIR_TXRTSPOL | UARTMODIR_TXRTSE); u32 ctrl;
/* TXRTSE and TXRTSPOL only can be changed when transmitter is disabled. */ - ctrl = lpuart32_read(&sport->port, UARTCTRL); + ctrl = lpuart32_read(port, UARTCTRL); if (ctrl & UARTCTRL_TE) { /* wait for the transmit engine to complete */ - lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC); - lpuart32_write(&sport->port, ctrl & ~UARTCTRL_TE, UARTCTRL); + lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TC); + lpuart32_write(port, ctrl & ~UARTCTRL_TE, UARTCTRL);
- while (lpuart32_read(&sport->port, UARTCTRL) & UARTCTRL_TE) + while (lpuart32_read(port, UARTCTRL) & UARTCTRL_TE) cpu_relax(); }
- lpuart32_write(&sport->port, modem, UARTMODIR); + lpuart32_write(port, modem, UARTMODIR);
if (rs485->flags & SER_RS485_ENABLED) { /* Enable auto RS-485 RTS mode */ @@ -1515,10 +1506,10 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio modem &= ~UARTMODIR_TXRTSPOL; }
- lpuart32_write(&sport->port, modem, UARTMODIR); + lpuart32_write(port, modem, UARTMODIR);
if (ctrl & UARTCTRL_TE) - lpuart32_write(&sport->port, ctrl, UARTCTRL); + lpuart32_write(port, ctrl, UARTCTRL);
return 0; } @@ -1577,7 +1568,7 @@ static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void lpuart_break_ctl(struct uart_port *port, int break_state) { - unsigned char temp; + u8 temp;
temp = readb(port->membase + UARTCR2) & ~UARTCR2_SBK;
@@ -1589,7 +1580,7 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state)
static void lpuart32_break_ctl(struct uart_port *port, int break_state) { - unsigned long temp; + u32 temp;
temp = lpuart32_read(port, UARTCTRL);
@@ -1623,8 +1614,7 @@ static void lpuart32_break_ctl(struct uart_port *port, int break_state)
static void lpuart_setup_watermark(struct lpuart_port *sport) { - unsigned char val, cr2; - unsigned char cr2_saved; + u8 val, cr2, cr2_saved;
cr2 = readb(sport->port.membase + UARTCR2); cr2_saved = cr2; @@ -1657,7 +1647,7 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
static void lpuart_setup_watermark_enable(struct lpuart_port *sport) { - unsigned char cr2; + u8 cr2;
lpuart_setup_watermark(sport);
@@ -1668,8 +1658,7 @@ static void lpuart_setup_watermark_enable(struct lpuart_port *sport)
static void lpuart32_setup_watermark(struct lpuart_port *sport) { - unsigned long val, ctrl; - unsigned long ctrl_saved; + u32 val, ctrl, ctrl_saved;
ctrl = lpuart32_read(&sport->port, UARTCTRL); ctrl_saved = ctrl; @@ -1778,7 +1767,7 @@ static void lpuart_tx_dma_startup(struct lpuart_port *sport) static void lpuart_rx_dma_startup(struct lpuart_port *sport) { int ret; - unsigned char cr3; + u8 cr3;
if (uart_console(&sport->port)) goto err; @@ -1828,14 +1817,14 @@ static void lpuart_hw_setup(struct lpuart_port *sport) static int lpuart_startup(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned char temp; + u8 temp;
/* determine FIFO size and enable FIFO mode */ - temp = readb(sport->port.membase + UARTPFIFO); + temp = readb(port->membase + UARTPFIFO);
sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) & UARTPFIFO_FIFOSIZE_MASK); - sport->port.fifosize = sport->txfifo_size; + port->fifosize = sport->txfifo_size;
sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) & UARTPFIFO_FIFOSIZE_MASK); @@ -1848,7 +1837,7 @@ static int lpuart_startup(struct uart_port *port)
static void lpuart32_hw_disable(struct lpuart_port *sport) { - unsigned long temp; + u32 temp;
temp = lpuart32_read(&sport->port, UARTCTRL); temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE | @@ -1858,7 +1847,7 @@ static void lpuart32_hw_disable(struct lpuart_port *sport)
static void lpuart32_configure(struct lpuart_port *sport) { - unsigned long temp; + u32 temp;
temp = lpuart32_read(&sport->port, UARTCTRL); if (!sport->lpuart_dma_rx_use) @@ -1888,14 +1877,14 @@ static void lpuart32_hw_setup(struct lpuart_port *sport) static int lpuart32_startup(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned long temp; + u32 temp;
/* determine FIFO size */ - temp = lpuart32_read(&sport->port, UARTFIFO); + temp = lpuart32_read(port, UARTFIFO);
sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) & UARTFIFO_FIFOSIZE_MASK); - sport->port.fifosize = sport->txfifo_size; + port->fifosize = sport->txfifo_size;
sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) & UARTFIFO_FIFOSIZE_MASK); @@ -1908,7 +1897,7 @@ static int lpuart32_startup(struct uart_port *port) if (is_layerscape_lpuart(sport)) { sport->rxfifo_size = 16; sport->txfifo_size = 16; - sport->port.fifosize = sport->txfifo_size; + port->fifosize = sport->txfifo_size; }
lpuart_request_dma(sport); @@ -1942,7 +1931,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport) static void lpuart_shutdown(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned char temp; + u8 temp; unsigned long flags;
uart_port_lock_irqsave(port, &flags); @@ -1962,14 +1951,14 @@ static void lpuart32_shutdown(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned long temp; + u32 temp; unsigned long flags;
uart_port_lock_irqsave(port, &flags);
/* clear status */ - temp = lpuart32_read(&sport->port, UARTSTAT); - lpuart32_write(&sport->port, temp, UARTSTAT); + temp = lpuart32_read(port, UARTSTAT); + lpuart32_write(port, temp, UARTSTAT);
/* disable Rx/Tx DMA */ temp = lpuart32_read(port, UARTBAUD); @@ -1998,17 +1987,17 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); unsigned long flags; - unsigned char cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem; + u8 cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem; unsigned int baud; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; unsigned int sbr, brfa;
- cr1 = old_cr1 = readb(sport->port.membase + UARTCR1); - old_cr2 = readb(sport->port.membase + UARTCR2); - cr3 = readb(sport->port.membase + UARTCR3); - cr4 = readb(sport->port.membase + UARTCR4); - bdh = readb(sport->port.membase + UARTBDH); - modem = readb(sport->port.membase + UARTMODEM); + cr1 = old_cr1 = readb(port->membase + UARTCR1); + old_cr2 = readb(port->membase + UARTCR2); + cr3 = readb(port->membase + UARTCR3); + cr4 = readb(port->membase + UARTCR4); + bdh = readb(port->membase + UARTBDH); + modem = readb(port->membase + UARTMODEM); /* * only support CS8 and CS7, and for CS7 must enable PE. * supported mode: @@ -2040,7 +2029,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, * When auto RS-485 RTS mode is enabled, * hardware flow control need to be disabled. */ - if (sport->port.rs485.flags & SER_RS485_ENABLED) + if (port->rs485.flags & SER_RS485_ENABLED) termios->c_cflag &= ~CRTSCTS;
if (termios->c_cflag & CRTSCTS) @@ -2081,59 +2070,59 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, * Need to update the Ring buffer length according to the selected * baud rate and restart Rx DMA path. * - * Since timer function acqures sport->port.lock, need to stop before + * Since timer function acqures port->lock, need to stop before * acquring same lock because otherwise del_timer_sync() can deadlock. */ if (old && sport->lpuart_dma_rx_use) - lpuart_dma_rx_free(&sport->port); + lpuart_dma_rx_free(port);
- uart_port_lock_irqsave(&sport->port, &flags); + uart_port_lock_irqsave(port, &flags);
- sport->port.read_status_mask = 0; + port->read_status_mask = 0; if (termios->c_iflag & INPCK) - sport->port.read_status_mask |= UARTSR1_FE | UARTSR1_PE; + port->read_status_mask |= UARTSR1_FE | UARTSR1_PE; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) - sport->port.read_status_mask |= UARTSR1_FE; + port->read_status_mask |= UARTSR1_FE;
/* characters to ignore */ - sport->port.ignore_status_mask = 0; + port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) - sport->port.ignore_status_mask |= UARTSR1_PE; + port->ignore_status_mask |= UARTSR1_PE; if (termios->c_iflag & IGNBRK) { - sport->port.ignore_status_mask |= UARTSR1_FE; + port->ignore_status_mask |= UARTSR1_FE; /* * if we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) - sport->port.ignore_status_mask |= UARTSR1_OR; + port->ignore_status_mask |= UARTSR1_OR; }
/* update the per-port timeout */ uart_update_timeout(port, termios->c_cflag, baud);
/* wait transmit engin complete */ - lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC); + lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TC);
/* disable transmit and receive */ writeb(old_cr2 & ~(UARTCR2_TE | UARTCR2_RE), - sport->port.membase + UARTCR2); + port->membase + UARTCR2);
- sbr = sport->port.uartclk / (16 * baud); - brfa = ((sport->port.uartclk - (16 * sbr * baud)) * 2) / baud; + sbr = port->uartclk / (16 * baud); + brfa = ((port->uartclk - (16 * sbr * baud)) * 2) / baud; bdh &= ~UARTBDH_SBR_MASK; bdh |= (sbr >> 8) & 0x1F; cr4 &= ~UARTCR4_BRFA_MASK; brfa &= UARTCR4_BRFA_MASK; - writeb(cr4 | brfa, sport->port.membase + UARTCR4); - writeb(bdh, sport->port.membase + UARTBDH); - writeb(sbr & 0xFF, sport->port.membase + UARTBDL); - writeb(cr3, sport->port.membase + UARTCR3); - writeb(cr1, sport->port.membase + UARTCR1); - writeb(modem, sport->port.membase + UARTMODEM); + writeb(cr4 | brfa, port->membase + UARTCR4); + writeb(bdh, port->membase + UARTBDH); + writeb(sbr & 0xFF, port->membase + UARTBDL); + writeb(cr3, port->membase + UARTCR3); + writeb(cr1, port->membase + UARTCR1); + writeb(modem, port->membase + UARTMODEM);
/* restore control register */ - writeb(old_cr2, sport->port.membase + UARTCR2); + writeb(old_cr2, port->membase + UARTCR2);
if (old && sport->lpuart_dma_rx_use) { if (!lpuart_start_rx_dma(sport)) @@ -2142,7 +2131,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, sport->lpuart_dma_rx_use = false; }
- uart_port_unlock_irqrestore(&sport->port, flags); + uart_port_unlock_irqrestore(port, flags); }
static void __lpuart32_serial_setbrg(struct uart_port *port, @@ -2236,13 +2225,13 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); unsigned long flags; - unsigned long ctrl, old_ctrl, bd, modem; + u32 ctrl, old_ctrl, bd, modem; unsigned int baud; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
- ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL); - bd = lpuart32_read(&sport->port, UARTBAUD); - modem = lpuart32_read(&sport->port, UARTMODIR); + ctrl = old_ctrl = lpuart32_read(port, UARTCTRL); + bd = lpuart32_read(port, UARTBAUD); + modem = lpuart32_read(port, UARTMODIR); sport->is_cs7 = false; /* * only support CS8 and CS7 @@ -2276,7 +2265,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, * When auto RS-485 RTS mode is enabled, * hardware flow control need to be disabled. */ - if (sport->port.rs485.flags & SER_RS485_ENABLED) + if (port->rs485.flags & SER_RS485_ENABLED) termios->c_cflag &= ~CRTSCTS;
if (termios->c_cflag & CRTSCTS) @@ -2326,59 +2315,61 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, * Need to update the Ring buffer length according to the selected * baud rate and restart Rx DMA path. * - * Since timer function acqures sport->port.lock, need to stop before + * Since timer function acqures port->lock, need to stop before * acquring same lock because otherwise del_timer_sync() can deadlock. */ if (old && sport->lpuart_dma_rx_use) - lpuart_dma_rx_free(&sport->port); + lpuart_dma_rx_free(port);
- uart_port_lock_irqsave(&sport->port, &flags); + uart_port_lock_irqsave(port, &flags);
- sport->port.read_status_mask = 0; + port->read_status_mask = 0; if (termios->c_iflag & INPCK) - sport->port.read_status_mask |= UARTSTAT_FE | UARTSTAT_PE; + port->read_status_mask |= UARTSTAT_FE | UARTSTAT_PE; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) - sport->port.read_status_mask |= UARTSTAT_FE; + port->read_status_mask |= UARTSTAT_FE;
/* characters to ignore */ - sport->port.ignore_status_mask = 0; + port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) - sport->port.ignore_status_mask |= UARTSTAT_PE; + port->ignore_status_mask |= UARTSTAT_PE; if (termios->c_iflag & IGNBRK) { - sport->port.ignore_status_mask |= UARTSTAT_FE; + port->ignore_status_mask |= UARTSTAT_FE; /* * if we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) - sport->port.ignore_status_mask |= UARTSTAT_OR; + port->ignore_status_mask |= UARTSTAT_OR; }
/* update the per-port timeout */ uart_update_timeout(port, termios->c_cflag, baud);
+ /* + * disable CTS to ensure the transmit engine is not blocked by the flow + * control when there is dirty data in TX FIFO + */ + lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); + /* * LPUART Transmission Complete Flag may never be set while queuing a break * character, so skip waiting for transmission complete when UARTCTRL_SBK is * asserted. */ - if (!(old_ctrl & UARTCTRL_SBK)) { - lpuart32_write(&sport->port, 0, UARTMODIR); - lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC); - } + if (!(old_ctrl & UARTCTRL_SBK)) + lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TC);
/* disable transmit and receive */ - lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE), + lpuart32_write(port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE), UARTCTRL);
- lpuart32_write(&sport->port, bd, UARTBAUD); + lpuart32_write(port, bd, UARTBAUD); lpuart32_serial_setbrg(sport, baud); - /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */ - lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); /* restore control register */ - lpuart32_write(&sport->port, ctrl, UARTCTRL); + lpuart32_write(port, ctrl, UARTCTRL); /* re-enable the CTS if needed */ - lpuart32_write(&sport->port, modem, UARTMODIR); + lpuart32_write(port, modem, UARTMODIR);
if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE) sport->is_cs7 = true; @@ -2390,7 +2381,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, sport->lpuart_dma_rx_use = false; }
- uart_port_unlock_irqrestore(&sport->port, flags); + uart_port_unlock_irqrestore(port, flags); }
static const char *lpuart_type(struct uart_port *port) @@ -2503,7 +2494,7 @@ static void lpuart_console_write(struct console *co, const char *s, unsigned int count) { struct lpuart_port *sport = lpuart_ports[co->index]; - unsigned char old_cr2, cr2; + u8 old_cr2, cr2; unsigned long flags; int locked = 1;
@@ -2533,7 +2524,7 @@ static void lpuart32_console_write(struct console *co, const char *s, unsigned int count) { struct lpuart_port *sport = lpuart_ports[co->index]; - unsigned long old_cr, cr; + u32 old_cr, cr; unsigned long flags; int locked = 1;
@@ -2567,7 +2558,7 @@ static void __init lpuart_console_get_options(struct lpuart_port *sport, int *baud, int *parity, int *bits) { - unsigned char cr, bdh, bdl, brfa; + u8 cr, bdh, bdl, brfa; unsigned int sbr, uartclk, baud_raw;
cr = readb(sport->port.membase + UARTCR2); @@ -2616,7 +2607,7 @@ static void __init lpuart32_console_get_options(struct lpuart_port *sport, int *baud, int *parity, int *bits) { - unsigned long cr, bd; + u32 cr, bd; unsigned int sbr, uartclk, baud_raw;
cr = lpuart32_read(&sport->port, UARTCTRL); @@ -2822,13 +2813,13 @@ static int lpuart_global_reset(struct lpuart_port *sport) { struct uart_port *port = &sport->port; void __iomem *global_addr; - unsigned long ctrl, bd; + u32 ctrl, bd; unsigned int val = 0; int ret;
ret = clk_prepare_enable(sport->ipg_clk); if (ret) { - dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret); + dev_err(port->dev, "failed to enable uart ipg clk: %d\n", ret); return ret; }
@@ -2839,10 +2830,10 @@ static int lpuart_global_reset(struct lpuart_port *sport) */ ctrl = lpuart32_read(port, UARTCTRL); if (ctrl & UARTCTRL_TE) { - bd = lpuart32_read(&sport->port, UARTBAUD); + bd = lpuart32_read(port, UARTBAUD); if (read_poll_timeout(lpuart32_tx_empty, val, val, 1, 100000, false, port)) { - dev_warn(sport->port.dev, + dev_warn(port->dev, "timeout waiting for transmit engine to complete\n"); clk_disable_unprepare(sport->ipg_clk); return 0; @@ -3028,7 +3019,7 @@ static int lpuart_runtime_resume(struct device *dev)
static void serial_lpuart_enable_wakeup(struct lpuart_port *sport, bool on) { - unsigned int val, baud; + u32 val, baud;
if (lpuart_is_32(sport)) { val = lpuart32_read(&sport->port, UARTCTRL); @@ -3093,7 +3084,7 @@ static int lpuart_suspend_noirq(struct device *dev) static int lpuart_resume_noirq(struct device *dev) { struct lpuart_port *sport = dev_get_drvdata(dev); - unsigned int val; + u32 val;
pinctrl_pm_select_default_state(dev);
@@ -3113,7 +3104,8 @@ static int lpuart_resume_noirq(struct device *dev) static int lpuart_suspend(struct device *dev) { struct lpuart_port *sport = dev_get_drvdata(dev); - unsigned long temp, flags; + u32 temp; + unsigned long flags;
uart_suspend_port(&lpuart_reg, &sport->port);
@@ -3193,7 +3185,7 @@ static void lpuart_console_fixup(struct lpuart_port *sport) * in VLLS mode, or restore console setting here. */ if (is_imx7ulp_lpuart(sport) && lpuart_uport_is_active(sport) && - console_suspend_enabled && uart_console(&sport->port)) { + console_suspend_enabled && uart_console(uport)) {
mutex_lock(&port->mutex); memset(&termios, 0, sizeof(struct ktermios)); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index fdf0c1008225..9aa7e2a876ec 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2391,10 +2391,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) page_size = readl(&xhci->op_regs->page_size); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Supported page size register = 0x%x", page_size); - i = ffs(page_size); - if (i < 16) + val = ffs(page_size) - 1; + if (val < 16) xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size of %iK", (1 << (i+12)) / 1024); + "Supported page size of %iK", (1 << (val + 12)) / 1024); else xhci_warn(xhci, "WARN: no supported page size\n"); /* Use 4K pages, since that's common and the minimum the HC supports */ diff --git a/drivers/usb/typec/altmodes/thunderbolt.c b/drivers/usb/typec/altmodes/thunderbolt.c index 1b475b1d98e7..6eadf7835f8f 100644 --- a/drivers/usb/typec/altmodes/thunderbolt.c +++ b/drivers/usb/typec/altmodes/thunderbolt.c @@ -112,7 +112,7 @@ static void tbt_altmode_work(struct work_struct *work) return;
disable_plugs: - for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) { + for (int i = TYPEC_PLUG_SOP_PP; i >= 0; --i) { if (tbt->plug[i]) typec_altmode_put_plug(tbt->plug[i]);
@@ -143,7 +143,7 @@ static int tbt_enter_modes_ordered(struct typec_altmode *alt) if (tbt->plug[TYPEC_PLUG_SOP_P]) { ret = typec_cable_altmode_enter(alt, TYPEC_PLUG_SOP_P, NULL); if (ret < 0) { - for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) { + for (int i = TYPEC_PLUG_SOP_PP; i >= 0; --i) { if (tbt->plug[i]) typec_altmode_put_plug(tbt->plug[i]);
@@ -324,7 +324,7 @@ static void tbt_altmode_remove(struct typec_altmode *alt) { struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
- for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) { + for (int i = TYPEC_PLUG_SOP_PP; i >= 0; --i) { if (tbt->plug[i]) typec_altmode_put_plug(tbt->plug[i]); } @@ -351,10 +351,10 @@ static bool tbt_ready(struct typec_altmode *alt) */ for (int i = 0; i < TYPEC_PLUG_SOP_PP + 1; i++) { plug = typec_altmode_get_plug(tbt->alt, i); - if (IS_ERR(plug)) + if (!plug) continue;
- if (!plug || plug->svid != USB_TYPEC_TBT_SID) + if (plug->svid != USB_TYPEC_TBT_SID) break;
plug->desc = "Thunderbolt3"; diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index 4b1668733a4b..511dd1b224ae 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -1433,11 +1433,10 @@ static int ucsi_ccg_probe(struct i2c_client *client) uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA; else if (!strcmp(fw_name, "nvidia,gpu")) uc->fw_build = CCG_FW_BUILD_NVIDIA; + if (!uc->fw_build) + dev_err(uc->dev, "failed to get FW build information\n"); }
- if (!uc->fw_build) - dev_err(uc->dev, "failed to get FW build information\n"); - /* reset ccg device and initialize ucsi */ status = ucsi_ccg_init(uc); if (status < 0) { diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 718fa4e0b31e..7aeff435c1d8 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1699,14 +1699,19 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } }
+ if (vs->vs_tpg) { + pr_err("vhost-scsi endpoint already set for %s.\n", + vs->vs_vhost_wwpn); + ret = -EEXIST; + goto out; + } + len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; vs_tpg = kzalloc(len, GFP_KERNEL); if (!vs_tpg) { ret = -ENOMEM; goto out; } - if (vs->vs_tpg) - memcpy(vs_tpg, vs->vs_tpg, len);
mutex_lock(&vhost_scsi_mutex); list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { @@ -1722,12 +1727,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, tv_tport = tpg->tport;
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { - if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { - mutex_unlock(&tpg->tv_tpg_mutex); - mutex_unlock(&vhost_scsi_mutex); - ret = -EEXIST; - goto undepend; - } /* * In order to ensure individual vhost-scsi configfs * groups cannot be removed while in use by vhost ioctl, @@ -1774,15 +1773,15 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } ret = 0; } else { - ret = -EEXIST; + ret = -ENODEV; + goto free_tpg; }
/* - * Act as synchronize_rcu to make sure access to - * old vs->vs_tpg is finished. + * Act as synchronize_rcu to make sure requests after this point + * see a fully setup device. */ vhost_scsi_flush(vs); - kfree(vs->vs_tpg); vs->vs_tpg = vs_tpg; goto out;
@@ -1802,6 +1801,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, target_undepend_item(&tpg->se_tpg.tpg_group.cg_item); } } +free_tpg: kfree(vs_tpg); out: mutex_unlock(&vs->dev.mutex); @@ -1904,6 +1904,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, vhost_scsi_flush(vs); kfree(vs->vs_tpg); vs->vs_tpg = NULL; + memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn)); WARN_ON(vs->vs_events_nr); mutex_unlock(&vs->dev.mutex); return 0; diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index bc31db6ef7d2..3e9f2bda6702 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -24,7 +24,7 @@ config VGA_CONSOLE Say Y.
config MDA_CONSOLE - depends on !M68K && !PARISC && ISA + depends on VGA_CONSOLE && ISA tristate "MDA text console (dual-headed)" help Say Y here if you have an old MDA or monochrome Hercules graphics @@ -52,7 +52,7 @@ config DUMMY_CONSOLE
config DUMMY_CONSOLE_COLUMNS int "Initial number of console screen columns" - depends on DUMMY_CONSOLE && !ARCH_FOOTBRIDGE + depends on DUMMY_CONSOLE && !(ARCH_FOOTBRIDGE && VGA_CONSOLE) default 160 if PARISC default 80 help @@ -62,7 +62,7 @@ config DUMMY_CONSOLE_COLUMNS
config DUMMY_CONSOLE_ROWS int "Initial number of console screen rows" - depends on DUMMY_CONSOLE && !ARCH_FOOTBRIDGE + depends on DUMMY_CONSOLE && !(ARCH_FOOTBRIDGE && VGA_CONSOLE) default 64 if PARISC default 30 if ARM default 25 diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 840f22160763..6251a6b07b3a 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -137,13 +137,15 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) */ int au1100fb_setmode(struct au1100fb_device *fbdev) { - struct fb_info *info = &fbdev->info; + struct fb_info *info; u32 words; int index;
if (!fbdev) return -EINVAL;
+ info = &fbdev->info; + /* Update var-dependent FB info */ if (panel_is_active(fbdev->panel) || panel_is_color(fbdev->panel)) { if (info->var.bits_per_pixel <= 8) { diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index 7734377b2d87..ed6f4f43e2d5 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -327,6 +327,13 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, if (var->xres_virtual > 4096 || var->yres_virtual > 2048) return -EINVAL;
+ /* geometry sanity checks */ + if (var->xres + var->xoffset > var->xres_virtual) + return -EINVAL; + + if (var->yres + var->yoffset > var->yres_virtual) + return -EINVAL; + /* can cope with 8,16 or 32bpp */
if (var->bits_per_pixel <= 8) diff --git a/drivers/w1/masters/w1-uart.c b/drivers/w1/masters/w1-uart.c index a31782e56ba7..c87eea347806 100644 --- a/drivers/w1/masters/w1-uart.c +++ b/drivers/w1/masters/w1-uart.c @@ -372,11 +372,11 @@ static int w1_uart_probe(struct serdev_device *serdev) init_completion(&w1dev->rx_byte_received); mutex_init(&w1dev->rx_mutex);
+ serdev_device_set_drvdata(serdev, w1dev); + serdev_device_set_client_ops(serdev, &w1_uart_serdev_ops); ret = w1_uart_serdev_open(w1dev); if (ret < 0) return ret; - serdev_device_set_drvdata(serdev, w1dev); - serdev_device_set_client_ops(serdev, &w1_uart_serdev_ops);
return w1_add_master_device(&w1dev->bus); } diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 143ac03b7425..3397939fd2d5 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -407,8 +407,8 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap, err); goto error; } - v9fs_fid_add(dentry, &fid); v9fs_set_create_acl(inode, fid, dacl, pacl); + v9fs_fid_add(dentry, &fid); d_instantiate(dentry, inode); err = 0; inc_nlink(dir); diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h index 77c7991d89aa..23cea74f9933 100644 --- a/fs/autofs/autofs_i.h +++ b/fs/autofs/autofs_i.h @@ -218,6 +218,8 @@ void autofs_clean_ino(struct autofs_info *);
static inline int autofs_check_pipe(struct file *pipe) { + if (pipe->f_mode & FMODE_PATH) + return -EINVAL; if (!(pipe->f_mode & FMODE_CAN_WRITE)) return -EINVAL; if (!S_ISFIFO(file_inode(pipe)->i_mode)) diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c index 15725b4ce393..4d6193820483 100644 --- a/fs/bcachefs/fs-ioctl.c +++ b/fs/bcachefs/fs-ioctl.c @@ -515,10 +515,12 @@ static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp, ret = -ENOENT; goto err; } - ret = __bch2_unlink(dir, victim, true); + + ret = inode_permission(file_mnt_idmap(filp), d_inode(victim), MAY_WRITE) ?: + __bch2_unlink(dir, victim, true); if (!ret) { fsnotify_rmdir(dir, victim); - d_delete(victim); + d_invalidate(victim); } err: inode_unlock(dir); diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index c0a8f7d92acc..b96b23594334 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1823,7 +1823,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); while (!list_empty(&fs_info->reclaim_bgs)) { u64 zone_unusable; - u64 reclaimed; + u64 used; + u64 reserved; int ret = 0;
bg = list_first_entry(&fs_info->reclaim_bgs, @@ -1915,19 +1916,42 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) if (ret < 0) goto next;
+ /* + * The amount of bytes reclaimed corresponds to the sum of the + * "used" and "reserved" counters. We have set the block group + * to RO above, which prevents reservations from happening but + * we may have existing reservations for which allocation has + * not yet been done - btrfs_update_block_group() was not yet + * called, which is where we will transfer a reserved extent's + * size from the "reserved" counter to the "used" counter - this + * happens when running delayed references. When we relocate the + * chunk below, relocation first flushes dellaloc, waits for + * ordered extent completion (which is where we create delayed + * references for data extents) and commits the current + * transaction (which runs delayed references), and only after + * it does the actual work to move extents out of the block + * group. So the reported amount of reclaimed bytes is + * effectively the sum of the 'used' and 'reserved' counters. + */ + spin_lock(&bg->lock); + used = bg->used; + reserved = bg->reserved; + spin_unlock(&bg->lock); + btrfs_info(fs_info, - "reclaiming chunk %llu with %llu%% used %llu%% unusable", + "reclaiming chunk %llu with %llu%% used %llu%% reserved %llu%% unusable", bg->start, - div64_u64(bg->used * 100, bg->length), + div64_u64(used * 100, bg->length), + div64_u64(reserved * 100, bg->length), div64_u64(zone_unusable * 100, bg->length)); trace_btrfs_reclaim_block_group(bg); - reclaimed = bg->used; ret = btrfs_relocate_chunk(fs_info, bg->start); if (ret) { btrfs_dec_block_group_ro(bg); btrfs_err(fs_info, "error relocating chunk %llu", bg->start); - reclaimed = 0; + used = 0; + reserved = 0; spin_lock(&space_info->lock); space_info->reclaim_errors++; if (READ_ONCE(space_info->periodic_reclaim)) @@ -1936,7 +1960,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) } spin_lock(&space_info->lock); space_info->reclaim_count++; - space_info->reclaim_bytes += reclaimed; + space_info->reclaim_bytes += used; + space_info->reclaim_bytes += reserved; spin_unlock(&space_info->lock);
next: @@ -2771,8 +2796,11 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) /* Already aborted the transaction if it failed. */ next: btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); + + spin_lock(&fs_info->unused_bgs_lock); list_del_init(&block_group->bg_list); clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); + spin_unlock(&fs_info->unused_bgs_lock);
/* * If the block group is still unused, add it to the list of diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f09db62e61a1..70b61bc237e9 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2561,6 +2561,9 @@ int btrfs_validate_super(const struct btrfs_fs_info *fs_info, ret = -EINVAL; }
+ if (ret) + return ret; + ret = validate_sys_chunk_array(fs_info, sb);
/* diff --git a/fs/coredump.c b/fs/coredump.c index 4375c70144d0..4ebec51fe4f2 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -1016,7 +1016,9 @@ static const struct ctl_table coredump_sysctls[] = { .data = &core_pipe_limit, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, { .procname = "core_file_note_size_limit", diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 8afac6e2dff0..1929327ffbe1 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -576,7 +576,7 @@ static int new_lockspace(const char *name, const char *cluster, lockspace to start running (via sysfs) in dlm_ls_start(). */
error = do_uevent(ls, 1); - if (error) + if (error < 0) goto out_recoverd;
/* wait until recovery is successful or failed */ diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 686d835eb533..efd25f3101f1 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -152,8 +152,6 @@ struct erofs_sb_info { /* used for statfs, f_files - f_favail */ u64 inos;
- u8 uuid[16]; /* 128-bit uuid for volume */ - u8 volume_name[16]; /* volume name */ u32 feature_compat; u32 feature_incompat;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 827b62665649..9f2bce5af9c8 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -317,14 +317,6 @@ static int erofs_read_superblock(struct super_block *sb)
super_set_uuid(sb, (void *)dsb->uuid, sizeof(dsb->uuid));
- ret = strscpy(sbi->volume_name, dsb->volume_name, - sizeof(dsb->volume_name)); - if (ret < 0) { /* -E2BIG */ - erofs_err(sb, "bad volume name without NIL terminator"); - ret = -EFSCORRUPTED; - goto out; - } - /* parse on-disk compression configurations */ ret = z_erofs_parse_cfgs(sb, dsb); if (ret < 0) diff --git a/fs/exec.c b/fs/exec.c index 506cd411f4ac..17047210be46 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1229,13 +1229,12 @@ int begin_new_exec(struct linux_binprm * bprm) */ bprm->point_of_no_return = true;
- /* - * Make this the only thread in the thread group. - */ + /* Make this the only thread in the thread group */ retval = de_thread(me); if (retval) goto out; - + /* see the comment in check_unsafe_exec() */ + current->fs->in_exec = 0; /* * Cancel any io_uring activity across execve */ @@ -1497,6 +1496,8 @@ static void free_bprm(struct linux_binprm *bprm) } free_arg_pages(bprm); if (bprm->cred) { + /* in case exec fails before de_thread() succeeds */ + current->fs->in_exec = 0; mutex_unlock(¤t->signal->cred_guard_mutex); abort_creds(bprm->cred); } @@ -1618,6 +1619,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm) * suid exec because the differently privileged task * will be able to manipulate the current directory, etc. * It would be nice to force an unshare instead... + * + * Otherwise we set fs->in_exec = 1 to deny clone(CLONE_FS) + * from another sub-thread until de_thread() succeeds, this + * state is protected by cred_guard_mutex we hold. */ n_fs = 1; spin_lock(&p->fs->lock); @@ -1862,7 +1867,6 @@ static int bprm_execve(struct linux_binprm *bprm)
sched_mm_cid_after_execve(current); /* execve succeeded */ - current->fs->in_exec = 0; current->in_execve = 0; rseq_execve(current); user_events_execve(current); @@ -1881,7 +1885,6 @@ static int bprm_execve(struct linux_binprm *bprm) force_fatal_sig(SIGSEGV);
sched_mm_cid_after_execve(current); - current->fs->in_exec = 0; current->in_execve = 0;
return retval; diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c index 6f3651c6ca91..8df5ad6ebb10 100644 --- a/fs/exfat/fatent.c +++ b/fs/exfat/fatent.c @@ -265,7 +265,7 @@ int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain, clu = next; if (exfat_ent_get(sb, clu, &next)) return -EIO; - } while (next != EXFAT_EOF_CLUSTER); + } while (next != EXFAT_EOF_CLUSTER && count <= p_chain->size);
if (p_chain->size != count) { exfat_fs_error(sb, diff --git a/fs/exfat/file.c b/fs/exfat/file.c index 807349d8ea05..841a5b18e3df 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -582,6 +582,9 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) loff_t pos = iocb->ki_pos; loff_t valid_size;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb))) + return -EIO; + inode_lock(inode);
valid_size = ei->valid_size; @@ -635,6 +638,16 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) return ret; }
+static ssize_t exfat_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct inode *inode = file_inode(iocb->ki_filp); + + if (unlikely(exfat_forced_shutdown(inode->i_sb))) + return -EIO; + + return generic_file_read_iter(iocb, iter); +} + static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf) { int err; @@ -672,14 +685,26 @@ static const struct vm_operations_struct exfat_file_vm_ops = {
static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma) { + if (unlikely(exfat_forced_shutdown(file_inode(file)->i_sb))) + return -EIO; + file_accessed(file); vma->vm_ops = &exfat_file_vm_ops; return 0; }
+static ssize_t exfat_splice_read(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, unsigned int flags) +{ + if (unlikely(exfat_forced_shutdown(file_inode(in)->i_sb))) + return -EIO; + + return filemap_splice_read(in, ppos, pipe, len, flags); +} + const struct file_operations exfat_file_operations = { .llseek = generic_file_llseek, - .read_iter = generic_file_read_iter, + .read_iter = exfat_file_read_iter, .write_iter = exfat_file_write_iter, .unlocked_ioctl = exfat_ioctl, #ifdef CONFIG_COMPAT @@ -687,7 +712,7 @@ const struct file_operations exfat_file_operations = { #endif .mmap = exfat_file_mmap, .fsync = exfat_file_fsync, - .splice_read = filemap_splice_read, + .splice_read = exfat_splice_read, .splice_write = iter_file_splice_write, };
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 96952d4acb50..a23677de4544 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -344,7 +344,8 @@ static int exfat_get_block(struct inode *inode, sector_t iblock, * The block has been partially written, * zero the unwritten part and map the block. */ - loff_t size, off, pos; + loff_t size, pos; + void *addr;
max_blocks = 1;
@@ -355,17 +356,43 @@ static int exfat_get_block(struct inode *inode, sector_t iblock, if (!bh_result->b_folio) goto done;
+ /* + * No buffer_head is allocated. + * (1) bmap: It's enough to fill bh_result without I/O. + * (2) read: The unwritten part should be filled with 0 + * If a folio does not have any buffers, + * let's returns -EAGAIN to fallback to + * per-bh IO like block_read_full_folio(). + */ + if (!folio_buffers(bh_result->b_folio)) { + err = -EAGAIN; + goto done; + } + pos = EXFAT_BLK_TO_B(iblock, sb); size = ei->valid_size - pos; - off = pos & (PAGE_SIZE - 1); + addr = folio_address(bh_result->b_folio) + + offset_in_folio(bh_result->b_folio, pos); + + /* Check if bh->b_data points to proper addr in folio */ + if (bh_result->b_data != addr) { + exfat_fs_error_ratelimit(sb, + "b_data(%p) != folio_addr(%p)", + bh_result->b_data, addr); + err = -EINVAL; + goto done; + }
- folio_set_bh(bh_result, bh_result->b_folio, off); + /* Read a block */ err = bh_read(bh_result, 0); if (err < 0) - goto unlock_ret; + goto done; + + /* Zero unwritten part of a block */ + memset(bh_result->b_data + size, 0, + bh_result->b_size - size);
- folio_zero_segment(bh_result->b_folio, off + size, - off + sb->s_blocksize); + err = 0; } else { /* * The range has not been written, clear the mapped flag @@ -376,6 +403,8 @@ static int exfat_get_block(struct inode *inode, sector_t iblock, } done: bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb); + if (err < 0) + clear_buffer_mapped(bh_result); unlock_ret: mutex_unlock(&sbi->s_lock); return err; diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 02d47a64e8d1..253992fcf57c 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -104,6 +104,9 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; + else if (unlikely(next_offset == size && de->name_len == 1 && + de->name[0] == '.')) + error_msg = "'.' directory cannot be the last in data block"; else return 0;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 4e7de7eaa374..df30d9f23512 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1821,7 +1821,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) */ enum { EXT4_MF_MNTDIR_SAMPLED, - EXT4_MF_FC_INELIGIBLE /* Fast commit ineligible */ + EXT4_MF_FC_INELIGIBLE, /* Fast commit ineligible */ + EXT4_MF_JOURNAL_DESTROY /* Journal is in process of destroying */ };
static inline void ext4_set_mount_flag(struct super_block *sb, int bit) @@ -2232,15 +2233,23 @@ extern int ext4_feature_set_ok(struct super_block *sb, int readonly); /* * Superblock flags */ -#define EXT4_FLAGS_RESIZING 0 -#define EXT4_FLAGS_SHUTDOWN 1 -#define EXT4_FLAGS_BDEV_IS_DAX 2 +enum { + EXT4_FLAGS_RESIZING, /* Avoid superblock update and resize race */ + EXT4_FLAGS_SHUTDOWN, /* Prevent access to the file system */ + EXT4_FLAGS_BDEV_IS_DAX, /* Current block device support DAX */ + EXT4_FLAGS_EMERGENCY_RO,/* Emergency read-only due to fs errors */ +};
static inline int ext4_forced_shutdown(struct super_block *sb) { return test_bit(EXT4_FLAGS_SHUTDOWN, &EXT4_SB(sb)->s_ext4_flags); }
+static inline int ext4_emergency_ro(struct super_block *sb) +{ + return test_bit(EXT4_FLAGS_EMERGENCY_RO, &EXT4_SB(sb)->s_ext4_flags); +} + /* * Default values for user and/or group using reserved blocks */ diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index 0c77697d5e90..ada46189b086 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -513,4 +513,33 @@ static inline int ext4_should_dioread_nolock(struct inode *inode) return 1; }
+/* + * Pass journal explicitly as it may not be cached in the sbi->s_journal in some + * cases + */ +static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *journal) +{ + int err = 0; + + /* + * At this point only two things can be operating on the journal. + * JBD2 thread performing transaction commit and s_sb_upd_work + * issuing sb update through the journal. Once we set + * EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not + * queue s_sb_upd_work and ext4_force_commit() makes sure any + * ext4_handle_error() calls from the running transaction commit are + * finished. Hence no new s_sb_upd_work can be queued after we + * flush it here. + */ + ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY); + + ext4_force_commit(sbi->s_sb); + flush_work(&sbi->s_sb_upd_work); + + err = jbd2_journal_destroy(journal); + sbi->s_journal = NULL; + + return err; +} + #endif /* _EXT4_JBD2_H */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7c54ae5fcbd4..4009f9017a0e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4674,6 +4674,11 @@ static inline int ext4_iget_extra_inode(struct inode *inode, *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { int err;
+ err = xattr_check_inode(inode, IHDR(inode, raw_inode), + ITAIL(inode, raw_inode)); + if (err) + return err; + ext4_set_inode_state(inode, EXT4_STATE_XATTR); err = ext4_find_inline_data_nolock(inode); if (!err && ext4_has_inline_data(inode)) @@ -5007,8 +5012,16 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, inode->i_op = &ext4_encrypted_symlink_inode_operations; } else if (ext4_inode_is_fast_symlink(inode)) { inode->i_op = &ext4_fast_symlink_inode_operations; - nd_terminate_link(ei->i_data, inode->i_size, - sizeof(ei->i_data) - 1); + if (inode->i_size == 0 || + inode->i_size >= sizeof(ei->i_data) || + strnlen((char *)ei->i_data, inode->i_size + 1) != + inode->i_size) { + ext4_error_inode(inode, function, line, 0, + "invalid fast symlink length %llu", + (unsigned long long)inode->i_size); + ret = -EFSCORRUPTED; + goto bad_inode; + } inode_set_cached_link(inode, (char *)ei->i_data, inode->i_size); } else { @@ -5464,7 +5477,7 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry, oldsize & (inode->i_sb->s_blocksize - 1)) { error = ext4_inode_attach_jinode(inode); if (error) - goto err_out; + goto out_mmap_sem; }
handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); diff --git a/fs/ext4/mballoc-test.c b/fs/ext4/mballoc-test.c index bb2a223b207c..d634c12f1984 100644 --- a/fs/ext4/mballoc-test.c +++ b/fs/ext4/mballoc-test.c @@ -796,6 +796,7 @@ static void test_mb_mark_used(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy); grp = kunit_kzalloc(test, offsetof(struct ext4_group_info, bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b); KUNIT_ASSERT_EQ(test, ret, 0); @@ -860,6 +861,7 @@ static void test_mb_free_blocks(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy); grp = kunit_kzalloc(test, offsetof(struct ext4_group_info, bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b); KUNIT_ASSERT_EQ(test, ret, 0); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 536d56d15072..8e49cb711858 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2577,8 +2577,10 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, BUFFER_TRACE(frame->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, sb, frame->bh, EXT4_JTR_NONE); - if (err) + if (err) { + brelse(bh2); goto journal_error; + } if (!add_level) { unsigned icount1 = icount/2, icount2 = icount - icount1; unsigned hash2 = dx_get_hash(entries + icount1); @@ -2589,8 +2591,10 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, err = ext4_journal_get_write_access(handle, sb, (frame - 1)->bh, EXT4_JTR_NONE); - if (err) + if (err) { + brelse(bh2); goto journal_error; + }
memcpy((char *) entries2, (char *) (entries + icount1), icount2 * sizeof(struct dx_entry)); @@ -2609,8 +2613,10 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, dxtrace(dx_show_index("node", ((struct dx_node *) bh2->b_data)->entries)); err = ext4_handle_dirty_dx_node(handle, dir, bh2); - if (err) + if (err) { + brelse(bh2); goto journal_error; + } brelse (bh2); err = ext4_handle_dirty_dx_node(handle, dir, (frame - 1)->bh); @@ -2635,8 +2641,10 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, "Creating %d level index...\n", dxroot->info.indirect_levels)); err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); - if (err) + if (err) { + brelse(bh2); goto journal_error; + } err = ext4_handle_dirty_dx_node(handle, dir, bh2); brelse(bh2); restart = 1; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index a50e5c31b937..dc46a7063f1e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -707,11 +707,8 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, if (test_opt(sb, WARN_ON_ERROR)) WARN_ON_ONCE(1);
- if (!continue_fs && !sb_rdonly(sb)) { - set_bit(EXT4_FLAGS_SHUTDOWN, &EXT4_SB(sb)->s_ext4_flags); - if (journal) - jbd2_journal_abort(journal, -EIO); - } + if (!continue_fs && !ext4_emergency_ro(sb) && journal) + jbd2_journal_abort(journal, -EIO);
if (!bdev_read_only(sb->s_bdev)) { save_error_info(sb, error, ino, block, func, line); @@ -719,9 +716,13 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, * In case the fs should keep running, we need to writeout * superblock through the journal. Due to lock ordering * constraints, it may not be safe to do it right here so we - * defer superblock flushing to a workqueue. + * defer superblock flushing to a workqueue. We just need to be + * careful when the journal is already shutting down. If we get + * here in that case, just update the sb directly as the last + * transaction won't commit anyway. */ - if (continue_fs && journal) + if (continue_fs && journal && + !ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY)) schedule_work(&EXT4_SB(sb)->s_sb_upd_work); else ext4_commit_super(sb); @@ -737,17 +738,17 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, sb->s_id); }
- if (sb_rdonly(sb) || continue_fs) + if (ext4_emergency_ro(sb) || continue_fs) return;
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); /* - * EXT4_FLAGS_SHUTDOWN was set which stops all filesystem - * modifications. We don't set SB_RDONLY because that requires - * sb->s_umount semaphore and setting it without proper remount - * procedure is confusing code such as freeze_super() leading to - * deadlocks and other problems. + * We don't set SB_RDONLY because that requires sb->s_umount + * semaphore and setting it without proper remount procedure is + * confusing code such as freeze_super() leading to deadlocks + * and other problems. */ + set_bit(EXT4_FLAGS_EMERGENCY_RO, &EXT4_SB(sb)->s_ext4_flags); }
static void update_super_work(struct work_struct *work) @@ -1306,18 +1307,17 @@ static void ext4_put_super(struct super_block *sb) ext4_unregister_li_request(sb); ext4_quotas_off(sb, EXT4_MAXQUOTAS);
- flush_work(&sbi->s_sb_upd_work); destroy_workqueue(sbi->rsv_conversion_wq); ext4_release_orphan_info(sb);
if (sbi->s_journal) { aborted = is_journal_aborted(sbi->s_journal); - err = jbd2_journal_destroy(sbi->s_journal); - sbi->s_journal = NULL; + err = ext4_journal_destroy(sbi, sbi->s_journal); if ((err < 0) && !aborted) { ext4_abort(sb, -err, "Couldn't clean up the journal"); } - } + } else + flush_work(&sbi->s_sb_upd_work);
ext4_es_unregister_shrinker(sbi); timer_shutdown_sync(&sbi->s_err_report); @@ -3038,6 +3038,9 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, if (nodefs && !test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) SEQ_OPTS_PUTS("prefetch_block_bitmaps");
+ if (ext4_emergency_ro(sb)) + SEQ_OPTS_PUTS("emergency_ro"); + ext4_show_quota_options(seq, sb); return 0; } @@ -4973,10 +4976,7 @@ static int ext4_load_and_init_journal(struct super_block *sb, return 0;
out: - /* flush s_sb_upd_work before destroying the journal. */ - flush_work(&sbi->s_sb_upd_work); - jbd2_journal_destroy(sbi->s_journal); - sbi->s_journal = NULL; + ext4_journal_destroy(sbi, sbi->s_journal); return -EINVAL; }
@@ -5665,10 +5665,7 @@ failed_mount8: __maybe_unused sbi->s_ea_block_cache = NULL;
if (sbi->s_journal) { - /* flush s_sb_upd_work before journal destroy. */ - flush_work(&sbi->s_sb_upd_work); - jbd2_journal_destroy(sbi->s_journal); - sbi->s_journal = NULL; + ext4_journal_destroy(sbi, sbi->s_journal); } failed_mount3a: ext4_es_unregister_shrinker(sbi); @@ -5973,7 +5970,7 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb, return journal;
out_journal: - jbd2_journal_destroy(journal); + ext4_journal_destroy(EXT4_SB(sb), journal); out_bdev: bdev_fput(bdev_file); return ERR_PTR(errno); @@ -6090,8 +6087,7 @@ static int ext4_load_journal(struct super_block *sb, EXT4_SB(sb)->s_journal = journal; err = ext4_clear_journal_err(sb, es); if (err) { - EXT4_SB(sb)->s_journal = NULL; - jbd2_journal_destroy(journal); + ext4_journal_destroy(EXT4_SB(sb), journal); return err; }
@@ -6109,7 +6105,7 @@ static int ext4_load_journal(struct super_block *sb, return 0;
err_out: - jbd2_journal_destroy(journal); + ext4_journal_destroy(EXT4_SB(sb), journal); return err; }
@@ -6817,22 +6813,29 @@ static int ext4_statfs_project(struct super_block *sb, dquot->dq_dqb.dqb_bhardlimit); limit >>= sb->s_blocksize_bits;
- if (limit && buf->f_blocks > limit) { + if (limit) { + uint64_t remaining = 0; + curblock = (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; - buf->f_blocks = limit; - buf->f_bfree = buf->f_bavail = - (buf->f_blocks > curblock) ? - (buf->f_blocks - curblock) : 0; + if (limit > curblock) + remaining = limit - curblock; + + buf->f_blocks = min(buf->f_blocks, limit); + buf->f_bfree = min(buf->f_bfree, remaining); + buf->f_bavail = min(buf->f_bavail, remaining); }
limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, dquot->dq_dqb.dqb_ihardlimit); - if (limit && buf->f_files > limit) { - buf->f_files = limit; - buf->f_ffree = - (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? - (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; + if (limit) { + uint64_t remaining = 0; + + if (limit > dquot->dq_dqb.dqb_curinodes) + remaining = limit - dquot->dq_dqb.dqb_curinodes; + + buf->f_files = min(buf->f_files, limit); + buf->f_ffree = min(buf->f_ffree, remaining); }
spin_unlock(&dquot->dq_dqb_lock); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 7647e9f6e190..a10fb8a9d02d 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -308,7 +308,7 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh, __ext4_xattr_check_block((inode), (bh), __func__, __LINE__)
-static inline int +int __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header, void *end, const char *function, unsigned int line) { @@ -316,9 +316,6 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header, function, line); }
-#define xattr_check_inode(inode, header, end) \ - __xattr_check_inode((inode), (header), (end), __func__, __LINE__) - static int xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry, void *end, int name_index, const char *name, int sorted) @@ -649,10 +646,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, return error; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); - end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; - error = xattr_check_inode(inode, header, end); - if (error) - goto cleanup; + end = ITAIL(inode, raw_inode); entry = IFIRST(header); error = xattr_find_entry(inode, &entry, end, name_index, name, 0); if (error) @@ -783,7 +777,6 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) struct ext4_xattr_ibody_header *header; struct ext4_inode *raw_inode; struct ext4_iloc iloc; - void *end; int error;
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) @@ -793,14 +786,9 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) return error; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); - end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; - error = xattr_check_inode(inode, header, end); - if (error) - goto cleanup; error = ext4_xattr_list_entries(dentry, IFIRST(header), buffer, buffer_size);
-cleanup: brelse(iloc.bh); return error; } @@ -868,7 +856,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage) struct ext4_xattr_ibody_header *header; struct ext4_xattr_entry *entry; qsize_t ea_inode_refs = 0; - void *end; int ret;
lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem); @@ -879,10 +866,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage) goto out; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); - end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; - ret = xattr_check_inode(inode, header, end); - if (ret) - goto out;
for (entry = IFIRST(header); !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) @@ -2235,11 +2218,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, header = IHDR(inode, raw_inode); is->s.base = is->s.first = IFIRST(header); is->s.here = is->s.first; - is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; + is->s.end = ITAIL(inode, raw_inode); if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { - error = xattr_check_inode(inode, header, is->s.end); - if (error) - return error; /* Find the named attribute. */ error = xattr_find_entry(inode, &is->s.here, is->s.end, i->name_index, i->name, 0); @@ -2786,14 +2766,10 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, */
base = IFIRST(header); - end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; + end = ITAIL(inode, raw_inode); min_offs = end - base; total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
- error = xattr_check_inode(inode, header, end); - if (error) - goto cleanup; - ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino); if (ifree >= isize_diff) goto shift; diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index b25c2d7b5f99..1fedf44d4fb6 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -67,6 +67,9 @@ struct ext4_xattr_entry { ((void *)raw_inode + \ EXT4_GOOD_OLD_INODE_SIZE + \ EXT4_I(inode)->i_extra_isize)) +#define ITAIL(inode, raw_inode) \ + ((void *)(raw_inode) + \ + EXT4_SB((inode)->i_sb)->s_inode_size) #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
/* @@ -206,6 +209,13 @@ extern int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, extern struct mb_cache *ext4_xattr_create_cache(void); extern void ext4_xattr_destroy_cache(struct mb_cache *);
+extern int +__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header, + void *end, const char *function, unsigned int line); + +#define xattr_check_inode(inode, header, end) \ + __xattr_check_inode((inode), (header), (end), __func__, __LINE__) + #ifdef CONFIG_EXT4_FS_SECURITY extern int ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir, const struct qstr *qstr); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index efda9a022981..bd890738b94d 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1237,7 +1237,7 @@ static int block_operations(struct f2fs_sb_info *sbi) retry_flush_quotas: f2fs_lock_all(sbi); if (__need_flush_quota(sbi)) { - int locked; + bool need_lock = sbi->umount_lock_holder != current;
if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) { set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); @@ -1246,11 +1246,13 @@ static int block_operations(struct f2fs_sb_info *sbi) } f2fs_unlock_all(sbi);
- /* only failed during mount/umount/freeze/quotactl */ - locked = down_read_trylock(&sbi->sb->s_umount); - f2fs_quota_sync(sbi->sb, -1); - if (locked) + /* don't grab s_umount lock during mount/umount/remount/freeze/quotactl */ + if (!need_lock) { + f2fs_do_quota_sync(sbi->sb, -1); + } else if (down_read_trylock(&sbi->sb->s_umount)) { + f2fs_do_quota_sync(sbi->sb, -1); up_read(&sbi->sb->s_umount); + } cond_resched(); goto retry_flush_quotas; } @@ -1867,7 +1869,8 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi) struct cp_control cpc;
cpc.reason = __get_cp_reason(sbi); - if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) { + if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC || + sbi->umount_lock_holder == current) { int ret;
f2fs_down_write(&sbi->gc_lock); diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 985690d81a82..9b94810675c1 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -1150,6 +1150,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, f2fs_compress_ctx_add_page(cc, page_folio(page));
if (!PageUptodate(page)) { + f2fs_handle_page_eio(sbi, page_folio(page), DATA); release_and_retry: f2fs_put_rpages(cc); f2fs_unlock_rpages(cc, i + 1); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index de4da6d9cd93..8440a1ed24f2 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2178,6 +2178,12 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, int i; int ret = 0;
+ if (unlikely(f2fs_cp_error(sbi))) { + ret = -EIO; + from_dnode = false; + goto out_put_dnode; + } + f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) + @@ -2221,10 +2227,6 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, if (ret) goto out;
- if (unlikely(f2fs_cp_error(sbi))) { - ret = -EIO; - goto out_put_dnode; - } f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
skip_reading_dnode: diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 1afa7be16e7d..493dda2d4b66 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1659,6 +1659,7 @@ struct f2fs_sb_info {
unsigned int nquota_files; /* # of quota sysfile */ struct f2fs_rwsem quota_sem; /* blocking cp for flags */ + struct task_struct *umount_lock_holder; /* s_umount lock holder */
/* # of pages, see count_type */ atomic_t nr_pages[NR_COUNT_TYPE]; @@ -3624,7 +3625,7 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync); void f2fs_inode_synced(struct inode *inode); int f2fs_dquot_initialize(struct inode *inode); int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); -int f2fs_quota_sync(struct super_block *sb, int type); +int f2fs_do_quota_sync(struct super_block *sb, int type); loff_t max_file_blocks(struct inode *inode); void f2fs_quota_off_umount(struct super_block *sb); void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index f92a9fba9991..44a658662462 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1834,18 +1834,32 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
map.m_len = sec_blks; next_alloc: + f2fs_down_write(&sbi->pin_sem); + + if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { + if (has_not_enough_free_secs(sbi, 0, 0)) { + f2fs_up_write(&sbi->pin_sem); + err = -ENOSPC; + f2fs_warn_ratelimited(sbi, + "ino:%lu, start:%lu, end:%lu, need to trigger GC to " + "reclaim enough free segment when checkpoint is enabled", + inode->i_ino, pg_start, pg_end); + goto out_err; + } + } + if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ? ZONED_PIN_SEC_REQUIRED_COUNT : GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { f2fs_down_write(&sbi->gc_lock); stat_inc_gc_call_count(sbi, FOREGROUND); err = f2fs_gc(sbi, &gc_control); - if (err && err != -ENODATA) + if (err && err != -ENODATA) { + f2fs_up_write(&sbi->pin_sem); goto out_err; + } }
- f2fs_down_write(&sbi->pin_sem); - err = f2fs_allocate_pinning_section(sbi); if (err) { f2fs_up_write(&sbi->pin_sem); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 3dd25f64d6f1..cd17d6f4c291 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -789,6 +789,13 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) !is_inode_flag_set(inode, FI_DIRTY_INODE)) return 0;
+ /* + * no need to update inode page, ultimately f2fs_evict_inode() will + * clear dirty status of inode. + */ + if (f2fs_cp_error(sbi)) + return -EIO; + if (!f2fs_is_checkpoint_ready(sbi)) { f2fs_mark_inode_dirty_sync(inode, true); return -ENOSPC; diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index a278c7da8177..3d85d8116dae 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -502,6 +502,14 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, goto out; }
+ if (inode->i_nlink == 0) { + f2fs_warn(F2FS_I_SB(inode), "%s: inode (ino=%lx) has zero i_nlink", + __func__, inode->i_ino); + err = -EFSCORRUPTED; + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); + goto out_iput; + } + if (IS_ENCRYPTED(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && !fscrypt_has_permitted_context(dir, inode)) { diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index c282e8a0a2ec..384bca002ec9 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -2096,7 +2096,9 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, return false;
if (!force) { - if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks || + if (!f2fs_realtime_discard_enable(sbi) || + (!se->valid_blocks && + !IS_CURSEG(sbi, cpc->trim_start)) || SM_I(sbi)->dcc_info->nr_discards >= SM_I(sbi)->dcc_info->max_discards) return false; @@ -2320,10 +2322,9 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi) dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY; dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY; dcc->discard_io_aware = DPOLICY_IO_AWARE_ENABLE; - if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) + if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT || + F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) dcc->discard_granularity = BLKS_PER_SEG(sbi); - else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) - dcc->discard_granularity = BLKS_PER_SEC(sbi);
INIT_LIST_HEAD(&dcc->entry_list); for (i = 0; i < MAX_PLIST_NUM; i++) @@ -2806,7 +2807,7 @@ static int get_new_segment(struct f2fs_sb_info *sbi, MAIN_SECS(sbi)); if (secno >= MAIN_SECS(sbi)) { ret = -ENOSPC; - f2fs_bug_on(sbi, 1); + f2fs_bug_on(sbi, !pinning); goto out_unlock; } } @@ -2848,7 +2849,7 @@ static int get_new_segment(struct f2fs_sb_info *sbi, out_unlock: spin_unlock(&free_i->segmap_lock);
- if (ret == -ENOSPC) + if (ret == -ENOSPC && !pinning) f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT); return ret; } @@ -2921,6 +2922,13 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) return curseg->segno; }
+static void reset_curseg_fields(struct curseg_info *curseg) +{ + curseg->inited = false; + curseg->segno = NULL_SEGNO; + curseg->next_segno = 0; +} + /* * Allocate a current working segment. * This function always allocates a free segment in LFS manner. @@ -2939,7 +2947,7 @@ static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) ret = get_new_segment(sbi, &segno, new_sec, pinning); if (ret) { if (ret == -ENOSPC) - curseg->segno = NULL_SEGNO; + reset_curseg_fields(curseg); return ret; }
@@ -3710,13 +3718,6 @@ static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi, get_random_u32_inclusive(1, sbi->max_fragment_hole); }
-static void reset_curseg_fields(struct curseg_info *curseg) -{ - curseg->inited = false; - curseg->segno = NULL_SEGNO; - curseg->next_segno = 0; -} - int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 943be4f1d6d2..0465dc00b349 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -559,13 +559,16 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, unsigned int node_blocks, unsigned int data_blocks, unsigned int dent_blocks) { - unsigned int segno, left_blocks, blocks; int i;
/* check current data/node sections in the worst case. */ for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) { segno = CURSEG_I(sbi, i)->segno; + + if (unlikely(segno == NULL_SEGNO)) + return false; + left_blocks = CAP_BLKS_PER_SEC(sbi) - get_ckpt_valid_blocks(sbi, segno, true);
@@ -576,6 +579,10 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
/* check current data section for dentry blocks. */ segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno; + + if (unlikely(segno == NULL_SEGNO)) + return false; + left_blocks = CAP_BLKS_PER_SEC(sbi) - get_ckpt_valid_blocks(sbi, segno, true); if (dent_blocks > left_blocks) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 19b67828ae32..26b1021427ae 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1737,22 +1737,28 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
static int f2fs_freeze(struct super_block *sb) { + struct f2fs_sb_info *sbi = F2FS_SB(sb); + if (f2fs_readonly(sb)) return 0;
/* IO error happened before */ - if (unlikely(f2fs_cp_error(F2FS_SB(sb)))) + if (unlikely(f2fs_cp_error(sbi))) return -EIO;
/* must be clean, since sync_filesystem() was already called */ - if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY)) + if (is_sbi_flag_set(sbi, SBI_IS_DIRTY)) return -EINVAL;
+ sbi->umount_lock_holder = current; + /* Let's flush checkpoints and stop the thread. */ - f2fs_flush_ckpt_thread(F2FS_SB(sb)); + f2fs_flush_ckpt_thread(sbi); + + sbi->umount_lock_holder = NULL;
/* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */ - set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING); + set_sbi_flag(sbi, SBI_IS_FREEZING); return 0; }
@@ -2329,6 +2335,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) org_mount_opt = sbi->mount_opt; old_sb_flags = sb->s_flags;
+ sbi->umount_lock_holder = current; + #ifdef CONFIG_QUOTA org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { @@ -2552,6 +2560,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
limit_reserve_root(sbi); *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); + + sbi->umount_lock_holder = NULL; return 0; restore_checkpoint: if (need_enable_checkpoint) { @@ -2592,6 +2602,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) #endif sbi->mount_opt = org_mount_opt; sb->s_flags = old_sb_flags; + + sbi->umount_lock_holder = NULL; return err; }
@@ -2908,7 +2920,7 @@ static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type) return ret; }
-int f2fs_quota_sync(struct super_block *sb, int type) +int f2fs_do_quota_sync(struct super_block *sb, int type) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct quota_info *dqopt = sb_dqopt(sb); @@ -2956,11 +2968,21 @@ int f2fs_quota_sync(struct super_block *sb, int type) return ret; }
+static int f2fs_quota_sync(struct super_block *sb, int type) +{ + int ret; + + F2FS_SB(sb)->umount_lock_holder = current; + ret = f2fs_do_quota_sync(sb, type); + F2FS_SB(sb)->umount_lock_holder = NULL; + return ret; +} + static int f2fs_quota_on(struct super_block *sb, int type, int format_id, const struct path *path) { struct inode *inode; - int err; + int err = 0;
/* if quota sysfile exists, deny enabling quota with specific file */ if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) { @@ -2971,31 +2993,34 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id, if (path->dentry->d_sb != sb) return -EXDEV;
- err = f2fs_quota_sync(sb, type); + F2FS_SB(sb)->umount_lock_holder = current; + + err = f2fs_do_quota_sync(sb, type); if (err) - return err; + goto out;
inode = d_inode(path->dentry);
err = filemap_fdatawrite(inode->i_mapping); if (err) - return err; + goto out;
err = filemap_fdatawait(inode->i_mapping); if (err) - return err; + goto out;
err = dquot_quota_on(sb, type, format_id, path); if (err) - return err; + goto out;
inode_lock(inode); F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL; f2fs_set_inode_flags(inode); inode_unlock(inode); f2fs_mark_inode_dirty_sync(inode, false); - - return 0; +out: + F2FS_SB(sb)->umount_lock_holder = NULL; + return err; }
static int __f2fs_quota_off(struct super_block *sb, int type) @@ -3006,7 +3031,7 @@ static int __f2fs_quota_off(struct super_block *sb, int type) if (!inode || !igrab(inode)) return dquot_quota_off(sb, type);
- err = f2fs_quota_sync(sb, type); + err = f2fs_do_quota_sync(sb, type); if (err) goto out_put;
@@ -3029,6 +3054,8 @@ static int f2fs_quota_off(struct super_block *sb, int type) struct f2fs_sb_info *sbi = F2FS_SB(sb); int err;
+ F2FS_SB(sb)->umount_lock_holder = current; + err = __f2fs_quota_off(sb, type);
/* @@ -3038,6 +3065,9 @@ static int f2fs_quota_off(struct super_block *sb, int type) */ if (is_journalled_quota(sbi)) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); + + F2FS_SB(sb)->umount_lock_holder = NULL; + return err; }
@@ -3170,7 +3200,7 @@ int f2fs_dquot_initialize(struct inode *inode) return 0; }
-int f2fs_quota_sync(struct super_block *sb, int type) +int f2fs_do_quota_sync(struct super_block *sb, int type) { return 0; } @@ -4703,6 +4733,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) if (err) goto free_compress_inode;
+ sbi->umount_lock_holder = current; #ifdef CONFIG_QUOTA /* Enable quota usage during mount */ if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { @@ -4769,10 +4800,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) } }
+reset_checkpoint: #ifdef CONFIG_QUOTA f2fs_recover_quota_end(sbi, quota_enabled); #endif -reset_checkpoint: /* * If the f2fs is not readonly and fsync data recovery succeeds, * write pointer consistency of cursegs and other zones are already @@ -4829,6 +4860,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) f2fs_update_time(sbi, CP_TIME); f2fs_update_time(sbi, REQ_TIME); clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); + + sbi->umount_lock_holder = NULL; return 0;
sync_free_meta: @@ -4931,6 +4964,8 @@ static void kill_f2fs_super(struct super_block *sb) struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (sb->s_root) { + sbi->umount_lock_holder = current; + set_sbi_flag(sbi, SBI_IS_CLOSE); f2fs_stop_gc_thread(sbi); f2fs_stop_discard_thread(sbi); diff --git a/fs/fsopen.c b/fs/fsopen.c index 094a7f510edf..1aaf4cb2afb2 100644 --- a/fs/fsopen.c +++ b/fs/fsopen.c @@ -453,7 +453,7 @@ SYSCALL_DEFINE5(fsconfig, case FSCONFIG_SET_FD: param.type = fs_value_is_file; ret = -EBADF; - param.file = fget(aux); + param.file = fget_raw(aux); if (!param.file) goto out_key; param.dirfd = aux; diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 0b6ee6dd1fd6..b7f805d2a14f 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -682,7 +682,6 @@ static int __fuse_dax_break_layouts(struct inode *inode, bool *retry, 0, 0, fuse_wait_dax_page(inode)); }
-/* dmap_end == 0 leads to unmapping of whole file */ int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end) { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 3805f9b06c9d..3b031d24d369 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1940,7 +1940,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry, if (FUSE_IS_DAX(inode) && is_truncate) { filemap_invalidate_lock(mapping); fault_blocked = true; - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) { filemap_invalidate_unlock(mapping); return err; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index d63e56fd3dd2..754378dd9f71 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -253,7 +253,7 @@ static int fuse_open(struct inode *inode, struct file *file)
if (dax_truncate) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out_inode_unlock; } @@ -3205,7 +3205,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, inode_lock(inode); if (block_faults) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out; } diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 92a3b6ddafdc..0e6ad7bf32be 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1338,12 +1338,8 @@ static enum evict_behavior evict_should_delete(struct inode *inode,
/* Must not read inode block until block type has been verified */ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh); - if (unlikely(ret)) { - glock_clear_object(ip->i_iopen_gh.gh_gl, ip); - ip->i_iopen_gh.gh_flags |= GL_NOCACHE; - gfs2_glock_dq_uninit(&ip->i_iopen_gh); - return EVICT_SHOULD_DEFER_DELETE; - } + if (unlikely(ret)) + return EVICT_SHOULD_SKIP_DELETE;
if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino)) return EVICT_SHOULD_SKIP_DELETE; @@ -1363,15 +1359,8 @@ static enum evict_behavior evict_should_delete(struct inode *inode,
should_delete: if (gfs2_holder_initialized(&ip->i_iopen_gh) && - test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { - enum evict_behavior behavior = - gfs2_upgrade_iopen_glock(inode); - - if (behavior != EVICT_SHOULD_DELETE) { - gfs2_holder_uninit(&ip->i_iopen_gh); - return behavior; - } - } + test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) + return gfs2_upgrade_iopen_glock(inode); return EVICT_SHOULD_DELETE; }
@@ -1509,7 +1498,7 @@ static void gfs2_evict_inode(struct inode *inode) gfs2_glock_put(io_gl); goto out; } - behavior = EVICT_SHOULD_DELETE; + behavior = EVICT_SHOULD_SKIP_DELETE; } if (behavior == EVICT_SHOULD_DELETE) ret = evict_unlinked_inode(inode); diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h index 8b39c15c408c..15b2f094d36e 100644 --- a/fs/hostfs/hostfs.h +++ b/fs/hostfs/hostfs.h @@ -60,7 +60,7 @@ struct hostfs_stat { unsigned int uid; unsigned int gid; unsigned long long size; - struct hostfs_timespec atime, mtime, ctime; + struct hostfs_timespec atime, mtime, ctime, btime; unsigned int blksize; unsigned long long blocks; struct { diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index e0741e468956..e6e247235728 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -33,6 +33,7 @@ struct hostfs_inode_info { struct inode vfs_inode; struct mutex open_mutex; dev_t dev; + struct hostfs_timespec btime; };
static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode) @@ -547,6 +548,7 @@ static int hostfs_inode_set(struct inode *ino, void *data) }
HOSTFS_I(ino)->dev = dev; + HOSTFS_I(ino)->btime = st->btime; ino->i_ino = st->ino; ino->i_mode = st->mode; return hostfs_inode_update(ino, st); @@ -557,7 +559,10 @@ static int hostfs_inode_test(struct inode *inode, void *data) const struct hostfs_stat *st = data; dev_t dev = MKDEV(st->dev.maj, st->dev.min);
- return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev; + return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev && + (inode->i_mode & S_IFMT) == (st->mode & S_IFMT) && + HOSTFS_I(inode)->btime.tv_sec == st->btime.tv_sec && + HOSTFS_I(inode)->btime.tv_nsec == st->btime.tv_nsec; }
static struct inode *hostfs_iget(struct super_block *sb, char *name) diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c index 97e9c40a9448..3bcd9f35e70b 100644 --- a/fs/hostfs/hostfs_user.c +++ b/fs/hostfs/hostfs_user.c @@ -18,39 +18,48 @@ #include "hostfs.h" #include <utime.h>
-static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p) +static void statx_to_hostfs(const struct statx *buf, struct hostfs_stat *p) { - p->ino = buf->st_ino; - p->mode = buf->st_mode; - p->nlink = buf->st_nlink; - p->uid = buf->st_uid; - p->gid = buf->st_gid; - p->size = buf->st_size; - p->atime.tv_sec = buf->st_atime; - p->atime.tv_nsec = 0; - p->ctime.tv_sec = buf->st_ctime; - p->ctime.tv_nsec = 0; - p->mtime.tv_sec = buf->st_mtime; - p->mtime.tv_nsec = 0; - p->blksize = buf->st_blksize; - p->blocks = buf->st_blocks; - p->rdev.maj = os_major(buf->st_rdev); - p->rdev.min = os_minor(buf->st_rdev); - p->dev.maj = os_major(buf->st_dev); - p->dev.min = os_minor(buf->st_dev); + p->ino = buf->stx_ino; + p->mode = buf->stx_mode; + p->nlink = buf->stx_nlink; + p->uid = buf->stx_uid; + p->gid = buf->stx_gid; + p->size = buf->stx_size; + p->atime.tv_sec = buf->stx_atime.tv_sec; + p->atime.tv_nsec = buf->stx_atime.tv_nsec; + p->ctime.tv_sec = buf->stx_ctime.tv_sec; + p->ctime.tv_nsec = buf->stx_ctime.tv_nsec; + p->mtime.tv_sec = buf->stx_mtime.tv_sec; + p->mtime.tv_nsec = buf->stx_mtime.tv_nsec; + if (buf->stx_mask & STATX_BTIME) { + p->btime.tv_sec = buf->stx_btime.tv_sec; + p->btime.tv_nsec = buf->stx_btime.tv_nsec; + } else { + memset(&p->btime, 0, sizeof(p->btime)); + } + p->blksize = buf->stx_blksize; + p->blocks = buf->stx_blocks; + p->rdev.maj = buf->stx_rdev_major; + p->rdev.min = buf->stx_rdev_minor; + p->dev.maj = buf->stx_dev_major; + p->dev.min = buf->stx_dev_minor; }
int stat_file(const char *path, struct hostfs_stat *p, int fd) { - struct stat64 buf; + struct statx buf; + int flags = AT_SYMLINK_NOFOLLOW;
if (fd >= 0) { - if (fstat64(fd, &buf) < 0) - return -errno; - } else if (lstat64(path, &buf) < 0) { - return -errno; + flags |= AT_EMPTY_PATH; + path = ""; } - stat64_to_hostfs(&buf, p); + + if ((statx(fd, path, flags, STATX_BASIC_STATS | STATX_BTIME, &buf)) < 0) + return -errno; + + statx_to_hostfs(&buf, p); return 0; }
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index eb2f8273e6f1..09df40b612fb 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -147,7 +147,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, de = tmpde; } /* Basic sanity check, whether name doesn't exceed dir entry */ - if (de_len < de->name_len[0] + + if (de_len < sizeof(struct iso_directory_record) || + de_len < de->name_len[0] + sizeof(struct iso_directory_record)) { printk(KERN_NOTICE "iso9660: Corrupted directory entry" " in block %lu of inode %lu\n", block, diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index d8084b31b361..a10e086a0165 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -603,7 +603,7 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid) int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid) { int ret = 0; - transaction_t *commit_trans; + transaction_t *commit_trans, *running_trans;
if (!(journal->j_flags & JBD2_BARRIER)) return 0; @@ -613,6 +613,16 @@ int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid) goto out; commit_trans = journal->j_committing_transaction; if (!commit_trans || commit_trans->t_tid != tid) { + running_trans = journal->j_running_transaction; + /* + * The query transaction hasn't started committing, + * it must still be running. + */ + if (WARN_ON_ONCE(!running_trans || + running_trans->t_tid != tid)) + goto out; + + running_trans->t_need_data_flush = 1; ret = 1; goto out; } @@ -1965,17 +1975,15 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags) return err; }
- if (block_start == ~0ULL) { - block_start = phys_block; - block_stop = block_start - 1; - } + if (block_start == ~0ULL) + block_stop = block_start = phys_block;
/* * last block not contiguous with current block, * process last contiguous region and return to this block on * next loop */ - if (phys_block != block_stop + 1) { + if (phys_block != block_stop) { block--; } else { block_stop++; @@ -1994,11 +2002,10 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags) */ byte_start = block_start * journal->j_blocksize; byte_stop = block_stop * journal->j_blocksize; - byte_count = (block_stop - block_start + 1) * - journal->j_blocksize; + byte_count = (block_stop - block_start) * journal->j_blocksize;
truncate_inode_pages_range(journal->j_dev->bd_mapping, - byte_start, byte_stop); + byte_start, byte_stop - 1);
if (flags & JBD2_JOURNAL_FLUSH_DISCARD) { err = blkdev_issue_discard(journal->j_dev, @@ -2013,7 +2020,7 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags) }
if (unlikely(err != 0)) { - pr_err("JBD2: (error %d) unable to wipe journal at physical blocks %llu - %llu", + pr_err("JBD2: (error %d) unable to wipe journal at physical blocks [%llu, %llu)", err, block_start, block_stop); return err; } diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 07cfdc440596..60fc92dee24d 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -369,7 +369,7 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length)
ASSERT(length >= 0);
- if (test_cflag(COMMIT_Nolink, ip)) { + if (test_cflag(COMMIT_Nolink, ip) || isReadOnly(ip)) { xtTruncate(0, ip, length, COMMIT_WMAP); return; } diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 8f85177f284b..93db6eec4465 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -117,7 +117,8 @@ do { \ if (!(RC)) { \ if (((P)->header.nextindex > \ (((BN) == 0) ? DTROOTMAXSLOT : (P)->header.maxslot)) || \ - ((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT))) { \ + ((BN) && (((P)->header.maxslot > DTPAGEMAXSLOT) || \ + ((P)->header.stblindex >= DTPAGEMAXSLOT)))) { \ BT_PUTPAGE(MP); \ jfs_error((IP)->i_sb, \ "DT_GETPAGE: dtree page corrupt\n"); \ diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c index 63d21822d309..46529bcc8297 100644 --- a/fs/jfs/jfs_extent.c +++ b/fs/jfs/jfs_extent.c @@ -74,6 +74,11 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr) int rc; int xflag;
+ if (isReadOnly(ip)) { + jfs_error(ip->i_sb, "read-only filesystem\n"); + return -EIO; + } + /* This blocks if we are low on resources */ txBeginAnon(ip->i_sb);
@@ -253,6 +258,11 @@ int extRecord(struct inode *ip, xad_t * xp) { int rc;
+ if (isReadOnly(ip)) { + jfs_error(ip->i_sb, "read-only filesystem\n"); + return -EIO; + } + txBeginAnon(ip->i_sb);
mutex_lock(&JFS_IP(ip)->commit_mutex); diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index a360b24ed320..debfc1389cb3 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -3029,14 +3029,23 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno, * * RETURN VALUES: * 0 - success - * -ENOMEM - insufficient memory + * -EINVAL - unexpected inode type */ static int copy_from_dinode(struct dinode * dip, struct inode *ip) { struct jfs_inode_info *jfs_ip = JFS_IP(ip); struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); + int fileset = le32_to_cpu(dip->di_fileset); + + switch (fileset) { + case AGGR_RESERVED_I: case AGGREGATE_I: case BMAP_I: + case LOG_I: case BADBLOCK_I: case FILESYSTEM_I: + break; + default: + return -EINVAL; + }
- jfs_ip->fileset = le32_to_cpu(dip->di_fileset); + jfs_ip->fileset = fileset; jfs_ip->mode2 = le32_to_cpu(dip->di_mode); jfs_set_inode_flags(ip);
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index 24afbae87225..11d7f74d207b 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -559,11 +559,16 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
size_check: if (EALIST_SIZE(ea_buf->xattr) != ea_size) { - int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr)); - - printk(KERN_ERR "ea_get: invalid extended attribute\n"); - print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, - ea_buf->xattr, size, 1); + if (unlikely(EALIST_SIZE(ea_buf->xattr) > INT_MAX)) { + printk(KERN_ERR "ea_get: extended attribute size too large: %u > INT_MAX\n", + EALIST_SIZE(ea_buf->xattr)); + } else { + int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr)); + + printk(KERN_ERR "ea_get: invalid extended attribute\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, + ea_buf->xattr, size, 1); + } ea_release(inode, ea_buf); rc = -EIO; goto clean_up; diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 4db912f56230..325ba0663a6d 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -79,6 +79,7 @@ static void nfs_mark_return_delegation(struct nfs_server *server, struct nfs_delegation *delegation) { set_bit(NFS_DELEGATION_RETURN, &delegation->flags); + set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); }
@@ -330,14 +331,16 @@ nfs_start_delegation_return(struct nfs_inode *nfsi) }
static void nfs_abort_delegation_return(struct nfs_delegation *delegation, - struct nfs_client *clp, int err) + struct nfs_server *server, int err) { - spin_lock(&delegation->lock); clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags); if (err == -EAGAIN) { set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); - set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state); + set_bit(NFS4SERV_DELEGRETURN_DELAYED, + &server->delegation_flags); + set_bit(NFS4CLNT_DELEGRETURN_DELAYED, + &server->nfs_client->cl_state); } spin_unlock(&delegation->lock); } @@ -547,7 +550,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred, */ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync) { - struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + struct nfs_server *server = NFS_SERVER(inode); unsigned int mode = O_WRONLY | O_RDWR; int err = 0;
@@ -569,11 +572,11 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation /* * Guard against state recovery */ - err = nfs4_wait_clnt_recover(clp); + err = nfs4_wait_clnt_recover(server->nfs_client); }
if (err) { - nfs_abort_delegation_return(delegation, clp, err); + nfs_abort_delegation_return(delegation, server, err); goto out; }
@@ -590,17 +593,6 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) ret = true; - else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) { - struct inode *inode; - - spin_lock(&delegation->lock); - inode = delegation->inode; - if (inode && list_empty(&NFS_I(inode)->open_files)) - ret = true; - spin_unlock(&delegation->lock); - } - if (ret) - clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) || test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) || test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) @@ -619,6 +611,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server, struct nfs_delegation *place_holder_deleg = NULL; int err = 0;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN, + &server->delegation_flags)) + return 0; restart: /* * To avoid quadratic looping we hold a reference @@ -670,6 +665,7 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server, cond_resched(); if (!err) goto restart; + set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); goto out; } @@ -684,6 +680,9 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server) struct nfs_delegation *d; bool ret = false;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED, + &server->delegation_flags)) + goto out; list_for_each_entry_rcu (d, &server->delegations, super_list) { if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags)) continue; @@ -691,6 +690,7 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server) clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags); ret = true; } +out: return ret; }
@@ -878,11 +878,25 @@ int nfs4_inode_make_writeable(struct inode *inode) return nfs4_inode_return_delegation(inode); }
-static void nfs_mark_return_if_closed_delegation(struct nfs_server *server, - struct nfs_delegation *delegation) +static void +nfs_mark_return_if_closed_delegation(struct nfs_server *server, + struct nfs_delegation *delegation) { - set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); - set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); + struct inode *inode; + + if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) || + test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) + return; + spin_lock(&delegation->lock); + inode = delegation->inode; + if (!inode) + goto out; + if (list_empty(&NFS_I(inode)->open_files)) + nfs_mark_return_delegation(server, delegation); + else + set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); +out: + spin_unlock(&delegation->lock); }
static bool nfs_server_mark_return_all_delegations(struct nfs_server *server) @@ -1276,6 +1290,7 @@ static void nfs_mark_test_expired_delegation(struct nfs_server *server, return; clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); + set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags); set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state); }
@@ -1354,6 +1369,9 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server, nfs4_stateid stateid; unsigned long gen = ++server->delegation_gen;
+ if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED, + &server->delegation_flags)) + return 0; restart: rcu_read_lock(); list_for_each_entry_rcu(delegation, &server->delegations, super_list) { @@ -1383,6 +1401,9 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server, goto restart; } nfs_inode_mark_test_expired_delegation(server,inode); + set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags); + set_bit(NFS4CLNT_DELEGATION_EXPIRED, + &server->nfs_client->cl_state); iput(inode); return -EAGAIN; } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e8ac3f615f93..71f45cc0ca74 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -82,9 +82,8 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) */ #define pagepad_maxsz (1) -#define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2) -#define lock_owner_id_maxsz (1 + 1 + 4) -#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) +#define open_owner_id_maxsz (2 + 1 + 2 + 2) +#define lock_owner_id_maxsz (2 + 1 + 2) #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) #define op_encode_hdr_maxsz (1) @@ -185,7 +184,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, #define encode_claim_null_maxsz (1 + nfs4_name_maxsz) #define encode_open_maxsz (op_encode_hdr_maxsz + \ 2 + encode_share_access_maxsz + 2 + \ - open_owner_id_maxsz + \ + 1 + open_owner_id_maxsz + \ encode_opentype_maxsz + \ encode_claim_null_maxsz) #define decode_space_limit_maxsz (3) @@ -255,13 +254,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, #define encode_link_maxsz (op_encode_hdr_maxsz + \ nfs4_name_maxsz) #define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) -#define encode_lockowner_maxsz (7) +#define encode_lockowner_maxsz (2 + 1 + lock_owner_id_maxsz) + #define encode_lock_maxsz (op_encode_hdr_maxsz + \ 7 + \ 1 + encode_stateid_maxsz + 1 + \ encode_lockowner_maxsz) #define decode_lock_denied_maxsz \ - (8 + decode_lockowner_maxsz) + (2 + 2 + 1 + 2 + 1 + lock_owner_id_maxsz) #define decode_lock_maxsz (op_decode_hdr_maxsz + \ decode_lock_denied_maxsz) #define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \ @@ -617,7 +617,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, encode_lockowner_maxsz) #define NFS4_dec_release_lockowner_sz \ (compound_decode_hdr_maxsz + \ - decode_lockowner_maxsz) + decode_release_lockowner_maxsz) #define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ @@ -1412,7 +1412,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena __be32 *p; /* * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, - * owner 4 = 32 + * owner 28 */ encode_nfs4_seqid(xdr, arg->seqid); encode_share_access(xdr, arg->share_access); @@ -5077,7 +5077,7 @@ static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) /* * We create the owner, so we know a proper owner.id length is 4. */ -static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl) +static int decode_lock_denied(struct xdr_stream *xdr, struct file_lock *fl) { uint64_t offset, length, clientid; __be32 *p; diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c index 7b59a40d40c0..784f7c1d003b 100644 --- a/fs/nfs/sysfs.c +++ b/fs/nfs/sysfs.c @@ -14,6 +14,7 @@ #include <linux/rcupdate.h> #include <linux/lockd/lockd.h>
+#include "internal.h" #include "nfs4_fs.h" #include "netns.h" #include "sysfs.h" @@ -228,6 +229,25 @@ static void shutdown_client(struct rpc_clnt *clnt) rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL); }
+/* + * Shut down the nfs_client only once all the superblocks + * have been shut down. + */ +static void shutdown_nfs_client(struct nfs_client *clp) +{ + struct nfs_server *server; + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + if (!(server->flags & NFS_MOUNT_SHUTDOWN)) { + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + nfs_mark_client_ready(clp, -EIO); + shutdown_client(clp->cl_rpcclient); +} + static ssize_t shutdown_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -259,7 +279,6 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
server->flags |= NFS_MOUNT_SHUTDOWN; shutdown_client(server->client); - shutdown_client(server->nfs_client->cl_rpcclient);
if (!IS_ERR(server->client_acl)) shutdown_client(server->client_acl); @@ -267,6 +286,7 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr, if (server->nlm_host) shutdown_client(server->nlm_host->h_rpcclnt); out: + shutdown_nfs_client(server->nfs_client); return count; }
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index aa3d8bea3ec0..23df8b214474 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -579,8 +579,10 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
while (!nfs_lock_request(head)) { ret = nfs_wait_on_request(head); - if (ret < 0) + if (ret < 0) { + nfs_release_request(head); return ERR_PTR(ret); + } }
/* Ensure that nobody removed the request before we locked it */ diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index c0bd1509ccd4..792d3fed1b45 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -172,6 +172,16 @@ config NFSD_LEGACY_CLIENT_TRACKING recoverydir, or spawn a process directly using a usermodehelper upcall.
- These legacy client tracking methods have proven to be probelmatic + These legacy client tracking methods have proven to be problematic and will be removed in the future. Say Y here if you need support for them in the interim. + +config NFSD_V4_DELEG_TIMESTAMPS + bool "Support delegated timestamps" + depends on NFSD_V4 + default n + help + NFSD implements delegated timestamps according to + draft-ietf-nfsv4-delstid-08 "Extending the Opening of Files". This + is currently an experimental feature and is therefore left disabled + by default. diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 484077200c5d..d649a3d65a3a 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -101,15 +101,15 @@ static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap,
if (bitmap[0] & FATTR4_WORD0_CHANGE) if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0) - return -NFSERR_BAD_XDR; + return -EIO; if (bitmap[0] & FATTR4_WORD0_SIZE) if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0) - return -NFSERR_BAD_XDR; + return -EIO; if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) { fattr4_time_deleg_access access;
if (!xdrgen_decode_fattr4_time_deleg_access(xdr, &access)) - return -NFSERR_BAD_XDR; + return -EIO; fattr->ncf_cb_atime.tv_sec = access.seconds; fattr->ncf_cb_atime.tv_nsec = access.nseconds;
@@ -118,7 +118,7 @@ static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap, fattr4_time_deleg_modify modify;
if (!xdrgen_decode_fattr4_time_deleg_modify(xdr, &modify)) - return -NFSERR_BAD_XDR; + return -EIO; fattr->ncf_cb_mtime.tv_sec = modify.seconds; fattr->ncf_cb_mtime.tv_nsec = modify.nseconds;
@@ -682,15 +682,15 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp, if (unlikely(status || cb->cb_status)) return status; if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0) - return -NFSERR_BAD_XDR; + return -EIO; if (xdr_stream_decode_u32(xdr, &attrlen) < 0) - return -NFSERR_BAD_XDR; + return -EIO; maxlen = sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize); if (bitmap[2] != 0) maxlen += (sizeof(ncf->ncf_cb_mtime.tv_sec) + sizeof(ncf->ncf_cb_mtime.tv_nsec)) * 2; if (attrlen > maxlen) - return -NFSERR_BAD_XDR; + return -EIO; status = decode_cb_fattr4(xdr, bitmap, ncf); return status; } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 153eeea2c7c9..2de49e2d6ac4 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1050,6 +1050,12 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) return openlockstateid(stid); }
+/* + * As the sc_free callback of deleg, this may be called by nfs4_put_stid + * in nfsd_break_one_deleg. + * Considering nfsd_break_one_deleg is called with the flc->flc_lock held, + * this function mustn't ever sleep. + */ static void nfs4_free_deleg(struct nfs4_stid *stid) { struct nfs4_delegation *dp = delegstateid(stid); @@ -5414,6 +5420,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
static void nfsd_break_one_deleg(struct nfs4_delegation *dp) { + bool queued; /* * We're assuming the state code never drops its reference * without first removing the lease. Since we're in this lease @@ -5422,7 +5429,10 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) * we know it's safe to take a reference. */ refcount_inc(&dp->dl_stid.sc_count); - WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall)); + queued = nfsd4_run_cb(&dp->dl_recall); + WARN_ON_ONCE(!queued); + if (!queued) + nfs4_put_stid(&dp->dl_stid); }
/* Called from break_lease() with flc_lock held. */ @@ -5948,11 +5958,23 @@ nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf) return 0; }
+#ifdef CONFIG_NFSD_V4_DELEG_TIMESTAMPS +static bool nfsd4_want_deleg_timestamps(const struct nfsd4_open *open) +{ + return open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS; +} +#else /* CONFIG_NFSD_V4_DELEG_TIMESTAMPS */ +static bool nfsd4_want_deleg_timestamps(const struct nfsd4_open *open) +{ + return false; +} +#endif /* CONFIG NFSD_V4_DELEG_TIMESTAMPS */ + static struct nfs4_delegation * nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, struct svc_fh *parent) { - bool deleg_ts = open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS; + bool deleg_ts = nfsd4_want_deleg_timestamps(open); struct nfs4_client *clp = stp->st_stid.sc_client; struct nfs4_file *fp = stp->st_stid.sc_file; struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; @@ -6151,8 +6173,8 @@ static void nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, struct svc_fh *currentfh) { - bool deleg_ts = open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS; struct nfs4_openowner *oo = openowner(stp->st_stateowner); + bool deleg_ts = nfsd4_want_deleg_timestamps(open); struct nfs4_client *clp = stp->st_stid.sc_client; struct svc_fh *parent = NULL; struct nfs4_delegation *dp; @@ -6860,14 +6882,19 @@ deleg_reaper(struct nfsd_net *nn) spin_lock(&nn->client_lock); list_for_each_safe(pos, next, &nn->client_lru) { clp = list_entry(pos, struct nfs4_client, cl_lru); - if (clp->cl_state != NFSD4_ACTIVE || - list_empty(&clp->cl_delegations) || - atomic_read(&clp->cl_delegs_in_recall) || - test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) || - (ktime_get_boottime_seconds() - - clp->cl_ra_time < 5)) { + + if (clp->cl_state != NFSD4_ACTIVE) + continue; + if (list_empty(&clp->cl_delegations)) + continue; + if (atomic_read(&clp->cl_delegs_in_recall)) + continue; + if (test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags)) + continue; + if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5) + continue; + if (clp->cl_cb_state != NFSD4_CB_UP) continue; - } list_add(&clp->cl_ra_cblist, &cblist);
/* release in nfsd4_cb_recall_any_release */ @@ -7051,7 +7078,7 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, */ statusmask |= SC_STATUS_REVOKED;
- statusmask |= SC_STATUS_ADMIN_REVOKED; + statusmask |= SC_STATUS_ADMIN_REVOKED | SC_STATUS_FREEABLE;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || CLOSE_STATEID(stateid)) @@ -7706,9 +7733,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) return status;
- status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, - SC_STATUS_REVOKED | SC_STATUS_FREEABLE, - &s, nn); + status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, SC_STATUS_REVOKED, &s, nn); if (status) goto out; dp = delegstateid(s); diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index ce2a71e4904c..ac265d6fde35 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1917,6 +1917,7 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info) struct svc_serv *serv; LIST_HEAD(permsocks); struct nfsd_net *nn; + bool delete = false; int err, rem;
mutex_lock(&nfsd_mutex); @@ -1977,34 +1978,28 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info) } }
- /* For now, no removing old sockets while server is running */ - if (serv->sv_nrthreads && !list_empty(&permsocks)) { + /* + * If there are listener transports remaining on the permsocks list, + * it means we were asked to remove a listener. + */ + if (!list_empty(&permsocks)) { list_splice_init(&permsocks, &serv->sv_permsocks); - spin_unlock_bh(&serv->sv_lock); - err = -EBUSY; - goto out_unlock_mtx; + delete = true; } + spin_unlock_bh(&serv->sv_lock);
- /* Close the remaining sockets on the permsocks list */ - while (!list_empty(&permsocks)) { - xprt = list_first_entry(&permsocks, struct svc_xprt, xpt_list); - list_move(&xprt->xpt_list, &serv->sv_permsocks); - - /* - * Newly-created sockets are born with the BUSY bit set. Clear - * it if there are no threads, since nothing can pick it up - * in that case. - */ - if (!serv->sv_nrthreads) - clear_bit(XPT_BUSY, &xprt->xpt_flags); - - set_bit(XPT_CLOSE, &xprt->xpt_flags); - spin_unlock_bh(&serv->sv_lock); - svc_xprt_close(xprt); - spin_lock_bh(&serv->sv_lock); + /* Do not remove listeners while there are active threads. */ + if (serv->sv_nrthreads) { + err = -EBUSY; + goto out_unlock_mtx; }
- spin_unlock_bh(&serv->sv_lock); + /* + * Since we can't delete an arbitrary llist entry, destroy the + * remaining listeners and recreate the list. + */ + if (delete) + svc_xprt_destroy_all(serv, net);
/* walk list of addrs again, open any that still don't exist */ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) { @@ -2031,6 +2026,9 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
xprt = svc_find_listener(serv, xcl_name, net, sa); if (xprt) { + if (delete) + WARN_ONCE(1, "Transport type=%s already exists\n", + xcl_name); svc_xprt_put(xprt); continue; } @@ -2204,8 +2202,14 @@ static __net_init int nfsd_net_init(struct net *net) NFSD_STATS_COUNTERS_NUM); if (retval) goto out_repcache_error; + memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats)); nn->nfsd_svcstats.program = &nfsd_programs[0]; + if (!nfsd_proc_stat_init(net)) { + retval = -ENOMEM; + goto out_proc_error; + } + for (i = 0; i < sizeof(nn->nfsd_versions); i++) nn->nfsd_versions[i] = nfsd_support_version(i); for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++) @@ -2215,13 +2219,14 @@ static __net_init int nfsd_net_init(struct net *net) nfsd4_init_leases_net(nn); get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key)); seqlock_init(&nn->writeverf_lock); - nfsd_proc_stat_init(net); #if IS_ENABLED(CONFIG_NFS_LOCALIO) spin_lock_init(&nn->local_clients_lock); INIT_LIST_HEAD(&nn->local_clients); #endif return 0;
+out_proc_error: + percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM); out_repcache_error: nfsd_idmap_shutdown(net); out_idmap_error: diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c index bb22893f1157..f7eaf95e20fc 100644 --- a/fs/nfsd/stats.c +++ b/fs/nfsd/stats.c @@ -73,11 +73,11 @@ static int nfsd_show(struct seq_file *seq, void *v)
DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
-void nfsd_proc_stat_init(struct net *net) +struct proc_dir_entry *nfsd_proc_stat_init(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops); + return svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops); }
void nfsd_proc_stat_shutdown(struct net *net) diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h index 04aacb6c36e2..e4efb0e4e56d 100644 --- a/fs/nfsd/stats.h +++ b/fs/nfsd/stats.h @@ -10,7 +10,7 @@ #include <uapi/linux/nfsd/stats.h> #include <linux/percpu_counter.h>
-void nfsd_proc_stat_init(struct net *net); +struct proc_dir_entry *nfsd_proc_stat_init(struct net *net); void nfsd_proc_stat_shutdown(struct net *net);
static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn) diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 29cb7b812d71..6cd130b5c2b6 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1931,9 +1931,17 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, return err; }
-/* - * Unlink a file or directory - * N.B. After this call fhp needs an fh_put +/** + * nfsd_unlink - remove a directory entry + * @rqstp: RPC transaction context + * @fhp: the file handle of the parent directory to be modified + * @type: enforced file type of the object to be removed + * @fname: the name of directory entry to be removed + * @flen: length of @fname in octets + * + * After this call fhp needs an fh_put. + * + * Returns a generic NFS status code in network byte-order. */ __be32 nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, @@ -2007,15 +2015,17 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, fh_drop_write(fhp); out_nfserr: if (host_err == -EBUSY) { - /* name is mounted-on. There is no perfect - * error status. + /* + * See RFC 8881 Section 18.25.4 para 4: NFSv4 REMOVE + * wants a status unique to the object type. */ - err = nfserr_file_open; - } else { - err = nfserrno(host_err); + if (type != S_IFDIR) + err = nfserr_file_open; + else + err = nfserr_acces; } out: - return err; + return err != nfs_ok ? err : nfserrno(host_err); out_unlock: inode_unlock(dirp); goto out_drop_write; diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c index af94e3737470..e946f75eb540 100644 --- a/fs/ntfs3/attrib.c +++ b/fs/ntfs3/attrib.c @@ -2664,8 +2664,9 @@ int attr_set_compress(struct ntfs_inode *ni, bool compr) attr->nres.run_off = cpu_to_le16(run_off); }
- /* Update data attribute flags. */ + /* Update attribute flags. */ if (compr) { + attr->flags &= ~ATTR_FLAG_SPARSED; attr->flags |= ATTR_FLAG_COMPRESSED; attr->nres.c_unit = NTFS_LZNT_CUNIT; } else { diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index 3f96a11804c9..e9f701f884e7 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -101,8 +101,26 @@ int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, /* Allowed to change compression for empty files and for directories only. */ if (!is_dedup(ni) && !is_encrypted(ni) && (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { - /* Change compress state. */ - int err = ni_set_compress(inode, flags & FS_COMPR_FL); + int err = 0; + struct address_space *mapping = inode->i_mapping; + + /* write out all data and wait. */ + filemap_invalidate_lock(mapping); + err = filemap_write_and_wait(mapping); + + if (err >= 0) { + /* Change compress state. */ + bool compr = flags & FS_COMPR_FL; + err = ni_set_compress(inode, compr); + + /* For files change a_ops too. */ + if (!err) + mapping->a_ops = compr ? &ntfs_aops_cmpr : + &ntfs_aops; + } + + filemap_invalidate_unlock(mapping); + if (err) return err; } diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index 5df6a0b5add9..81271196c557 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -3434,10 +3434,12 @@ int ni_set_compress(struct inode *inode, bool compr) }
ni->std_fa = std->fa; - if (compr) + if (compr) { + std->fa &= ~FILE_ATTRIBUTE_SPARSE_FILE; std->fa |= FILE_ATTRIBUTE_COMPRESSED; - else + } else { std->fa &= ~FILE_ATTRIBUTE_COMPRESSED; + }
if (ni->std_fa != std->fa) { ni->std_fa = std->fa; diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 7eb9fae22f8d..78d20e4baa2c 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -618,7 +618,7 @@ static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes) u32 off = le32_to_cpu(hdr->de_off);
if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot || - off + sizeof(struct NTFS_DE) > end) { + size_add(off, sizeof(struct NTFS_DE)) > end) { /* incorrect index buffer. */ return false; } @@ -736,7 +736,7 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx, if (end > total) return NULL;
- if (off + sizeof(struct NTFS_DE) > end) + if (size_add(off, sizeof(struct NTFS_DE)) > end) return NULL;
e = Add2Ptr(hdr, off); diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h index 241f2ffdd920..1ff13b6f9613 100644 --- a/fs/ntfs3/ntfs.h +++ b/fs/ntfs3/ntfs.h @@ -717,7 +717,7 @@ static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr) struct NTFS_DE *e; u16 esize;
- if (de_off >= used || de_off + sizeof(struct NTFS_DE) > used ) + if (de_off >= used || size_add(de_off, sizeof(struct NTFS_DE)) > used) return NULL;
e = Add2Ptr(hdr, de_off); diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c index 6a0f6b0a3ab2..920a1ab47b63 100644 --- a/fs/ntfs3/super.c +++ b/fs/ntfs3/super.c @@ -555,6 +555,55 @@ static const struct proc_ops ntfs3_label_fops = { .proc_write = ntfs3_label_write, };
+static void ntfs_create_procdir(struct super_block *sb) +{ + struct proc_dir_entry *e; + + if (!proc_info_root) + return; + + e = proc_mkdir(sb->s_id, proc_info_root); + if (e) { + struct ntfs_sb_info *sbi = sb->s_fs_info; + + proc_create_data("volinfo", 0444, e, + &ntfs3_volinfo_fops, sb); + proc_create_data("label", 0644, e, + &ntfs3_label_fops, sb); + sbi->procdir = e; + } +} + +static void ntfs_remove_procdir(struct super_block *sb) +{ + struct ntfs_sb_info *sbi = sb->s_fs_info; + + if (!sbi->procdir) + return; + + remove_proc_entry("label", sbi->procdir); + remove_proc_entry("volinfo", sbi->procdir); + remove_proc_entry(sb->s_id, proc_info_root); + sbi->procdir = NULL; +} + +static void ntfs_create_proc_root(void) +{ + proc_info_root = proc_mkdir("fs/ntfs3", NULL); +} + +static void ntfs_remove_proc_root(void) +{ + if (proc_info_root) { + remove_proc_entry("fs/ntfs3", NULL); + proc_info_root = NULL; + } +} +#else +static void ntfs_create_procdir(struct super_block *sb) {} +static void ntfs_remove_procdir(struct super_block *sb) {} +static void ntfs_create_proc_root(void) {} +static void ntfs_remove_proc_root(void) {} #endif
static struct kmem_cache *ntfs_inode_cachep; @@ -644,15 +693,7 @@ static void ntfs_put_super(struct super_block *sb) { struct ntfs_sb_info *sbi = sb->s_fs_info;
-#ifdef CONFIG_PROC_FS - // Remove /proc/fs/ntfs3/.. - if (sbi->procdir) { - remove_proc_entry("label", sbi->procdir); - remove_proc_entry("volinfo", sbi->procdir); - remove_proc_entry(sb->s_id, proc_info_root); - sbi->procdir = NULL; - } -#endif + ntfs_remove_procdir(sb);
/* Mark rw ntfs as clear, if possible. */ ntfs_set_state(sbi, NTFS_DIRTY_CLEAR); @@ -1590,20 +1631,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc) kfree(boot2); }
-#ifdef CONFIG_PROC_FS - /* Create /proc/fs/ntfs3/.. */ - if (proc_info_root) { - struct proc_dir_entry *e = proc_mkdir(sb->s_id, proc_info_root); - static_assert((S_IRUGO | S_IWUSR) == 0644); - if (e) { - proc_create_data("volinfo", S_IRUGO, e, - &ntfs3_volinfo_fops, sb); - proc_create_data("label", S_IRUGO | S_IWUSR, e, - &ntfs3_label_fops, sb); - sbi->procdir = e; - } - } -#endif + ntfs_create_procdir(sb);
if (is_legacy_ntfs(sb)) sb->s_flags |= SB_RDONLY; @@ -1853,14 +1881,11 @@ static int __init init_ntfs_fs(void) if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS)) pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
-#ifdef CONFIG_PROC_FS - /* Create "/proc/fs/ntfs3" */ - proc_info_root = proc_mkdir("fs/ntfs3", NULL); -#endif + ntfs_create_proc_root();
err = ntfs3_init_bitmap(); if (err) - return err; + goto out2;
ntfs_inode_cachep = kmem_cache_create( "ntfs_inode_cache", sizeof(struct ntfs_inode), 0, @@ -1880,6 +1905,8 @@ static int __init init_ntfs_fs(void) kmem_cache_destroy(ntfs_inode_cachep); out1: ntfs3_exit_bitmap(); +out2: + ntfs_remove_proc_root(); return err; }
@@ -1890,11 +1917,7 @@ static void __exit exit_ntfs_fs(void) unregister_filesystem(&ntfs_fs_type); unregister_as_ntfs_legacy(); ntfs3_exit_bitmap(); - -#ifdef CONFIG_PROC_FS - if (proc_info_root) - remove_proc_entry("fs/ntfs3", NULL); -#endif + ntfs_remove_proc_root(); }
MODULE_LICENSE("GPL"); diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 4414743b638e..b8ac85b548c7 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -1803,6 +1803,14 @@ static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
el = root_el; while (el->l_tree_depth) { + if (unlikely(le16_to_cpu(el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH)) { + ocfs2_error(ocfs2_metadata_cache_get_super(ci), + "Owner %llu has invalid tree depth %u in extent list\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), + le16_to_cpu(el->l_tree_depth)); + ret = -EROFS; + goto out; + } if (le16_to_cpu(el->l_next_free_rec) == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(ci), "Owner %llu has empty extent list at depth %u\n", diff --git a/fs/proc/base.c b/fs/proc/base.c index cd89e956c322..7feb8f41aa25 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -416,7 +416,7 @@ static const struct file_operations proc_pid_cmdline_ops = { #ifdef CONFIG_KALLSYMS /* * Provides a wchan file via kallsyms in a proper one-value-per-file format. - * Returns the resolved symbol. If that fails, simply return the address. + * Returns the resolved symbol to user space. */ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 73f93a35eedd..cb14a6828c50 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -300,6 +300,7 @@ cifs_abort_connection(struct TCP_Server_Info *server) server->ssocket->flags); sock_release(server->ssocket); server->ssocket = NULL; + put_net(cifs_net_ns(server)); } server->sequence_number = 0; server->session_estab = false; @@ -3123,8 +3124,12 @@ generic_ip_connect(struct TCP_Server_Info *server) /* * Grab netns reference for the socket. * - * It'll be released here, on error, or in clean_demultiplex_info() upon server - * teardown. + * This reference will be released in several situations: + * - In the failure path before the cifsd thread is started. + * - In the all place where server->socket is released, it is + * also set to NULL. + * - Ultimately in clean_demultiplex_info(), during the final + * teardown. */ get_net(net);
@@ -3140,10 +3145,8 @@ generic_ip_connect(struct TCP_Server_Info *server) }
rc = bind_socket(server); - if (rc < 0) { - put_net(cifs_net_ns(server)); + if (rc < 0) return rc; - }
/* * Eventually check for other socket options to change from @@ -3189,9 +3192,6 @@ generic_ip_connect(struct TCP_Server_Info *server) if (sport == htons(RFC1001_PORT)) rc = ip_rfc1001_connect(server);
- if (rc < 0) - put_net(cifs_net_ns(server)); - return rc; }
diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c index 2a5b4a96bf99..83caa3849749 100644 --- a/fs/smb/server/auth.c +++ b/fs/smb/server/auth.c @@ -1016,9 +1016,9 @@ static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
ses_enc_key = enc ? sess->smb3encryptionkey : sess->smb3decryptionkey; - if (enc) - ksmbd_user_session_get(sess); memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); + if (!enc) + ksmbd_user_session_put(sess);
return 0; } @@ -1218,7 +1218,7 @@ int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov, free_sg: kfree(sg); free_req: - kfree(req); + aead_request_free(req); free_ctx: ksmbd_release_crypto_ctx(ctx); return rc; diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h index 91c2318639e7..14620e147dda 100644 --- a/fs/smb/server/connection.h +++ b/fs/smb/server/connection.h @@ -27,6 +27,7 @@ enum { KSMBD_SESS_EXITING, KSMBD_SESS_NEED_RECONNECT, KSMBD_SESS_NEED_NEGOTIATE, + KSMBD_SESS_NEED_SETUP, KSMBD_SESS_RELEASING };
@@ -187,6 +188,11 @@ static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn) return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE; }
+static inline bool ksmbd_conn_need_setup(struct ksmbd_conn *conn) +{ + return READ_ONCE(conn->status) == KSMBD_SESS_NEED_SETUP; +} + static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn) { return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT; @@ -217,6 +223,11 @@ static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn) WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE); }
+static inline void ksmbd_conn_set_need_setup(struct ksmbd_conn *conn) +{ + WRITE_ONCE(conn->status, KSMBD_SESS_NEED_SETUP); +} + static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn) { WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT); diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c index 71c6939dfbf1..3f45f28f6f0f 100644 --- a/fs/smb/server/mgmt/user_session.c +++ b/fs/smb/server/mgmt/user_session.c @@ -181,7 +181,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn) down_write(&sessions_table_lock); down_write(&conn->session_lock); xa_for_each(&conn->sessions, id, sess) { - if (atomic_read(&sess->refcnt) == 0 && + if (atomic_read(&sess->refcnt) <= 1 && (sess->state != SMB2_SESSION_VALID || time_after(jiffies, sess->last_active + SMB2_SESSION_TIMEOUT))) { @@ -230,7 +230,11 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) if (!ksmbd_chann_del(conn, sess) && xa_empty(&sess->ksmbd_chann_list)) { hash_del(&sess->hlist); - ksmbd_session_destroy(sess); + down_write(&conn->session_lock); + xa_erase(&conn->sessions, sess->id); + up_write(&conn->session_lock); + if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); } } } @@ -249,13 +253,30 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) if (xa_empty(&sess->ksmbd_chann_list)) { xa_erase(&conn->sessions, sess->id); hash_del(&sess->hlist); - ksmbd_session_destroy(sess); + if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); } } up_write(&conn->session_lock); up_write(&sessions_table_lock); }
+bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn, + unsigned long long id) +{ + struct ksmbd_session *sess; + + down_read(&conn->session_lock); + sess = xa_load(&conn->sessions, id); + if (sess) { + up_read(&conn->session_lock); + return true; + } + up_read(&conn->session_lock); + + return false; +} + struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, unsigned long long id) { @@ -309,8 +330,8 @@ void ksmbd_user_session_put(struct ksmbd_session *sess)
if (atomic_read(&sess->refcnt) <= 0) WARN_ON(1); - else - atomic_dec(&sess->refcnt); + else if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); }
struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn, @@ -353,13 +374,13 @@ void destroy_previous_session(struct ksmbd_conn *conn, ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT); err = ksmbd_conn_wait_idle_sess_id(conn, id); if (err) { - ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE); + ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP); goto out; }
ksmbd_destroy_file_table(&prev_sess->file_table); prev_sess->state = SMB2_SESSION_EXPIRED; - ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE); + ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP); ksmbd_launch_ksmbd_durable_scavenger(); out: up_write(&conn->session_lock); @@ -417,7 +438,7 @@ static struct ksmbd_session *__session_create(int protocol) xa_init(&sess->rpc_handle_list); sess->sequence_number = 1; rwlock_init(&sess->tree_conns_lock); - atomic_set(&sess->refcnt, 1); + atomic_set(&sess->refcnt, 2);
ret = __init_smb2_session(sess); if (ret) diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h index c1c4b20bd5c6..f21348381d59 100644 --- a/fs/smb/server/mgmt/user_session.h +++ b/fs/smb/server/mgmt/user_session.h @@ -87,6 +87,8 @@ void ksmbd_session_destroy(struct ksmbd_session *sess); struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id); struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, unsigned long long id); +bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn, + unsigned long long id); int ksmbd_session_register(struct ksmbd_conn *conn, struct ksmbd_session *sess); void ksmbd_sessions_deregister(struct ksmbd_conn *conn); diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index 28886ff1ee57..f103b1bd0400 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -724,8 +724,8 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo) work->conn = conn; work->sess = opinfo->sess;
+ ksmbd_conn_r_count_inc(conn); if (opinfo->op_state == OPLOCK_ACK_WAIT) { - ksmbd_conn_r_count_inc(conn); INIT_WORK(&work->work, __smb2_oplock_break_noti); ksmbd_queue_work(work);
@@ -833,8 +833,8 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo) work->conn = conn; work->sess = opinfo->sess;
+ ksmbd_conn_r_count_inc(conn); if (opinfo->op_state == OPLOCK_ACK_WAIT) { - ksmbd_conn_r_count_inc(conn); INIT_WORK(&work->work, __smb2_lease_break_noti); ksmbd_queue_work(work); wait_for_break_ack(opinfo); @@ -1505,6 +1505,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req) if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) { struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
+ if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < + sizeof(struct create_lease_v2) - 4) + return NULL; + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; lreq->flags = lc->lcontext.LeaseFlags; @@ -1517,6 +1521,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req) } else { struct create_lease *lc = (struct create_lease *)cc;
+ if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < + sizeof(struct create_lease)) + return NULL; + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; lreq->flags = lc->lcontext.LeaseFlags; diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index c53121538990..d24d95d15d87 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -1249,7 +1249,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work) }
conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode); - ksmbd_conn_set_need_negotiate(conn); + ksmbd_conn_set_need_setup(conn);
err_out: ksmbd_conn_unlock(conn); @@ -1271,6 +1271,9 @@ static int alloc_preauth_hash(struct ksmbd_session *sess, if (sess->Preauth_HashValue) return 0;
+ if (!conn->preauth_info) + return -ENOMEM; + sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue, PREAUTH_HASHVALUE_SIZE, KSMBD_DEFAULT_GFP); if (!sess->Preauth_HashValue) @@ -1674,6 +1677,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
ksmbd_debug(SMB, "Received smb2 session setup request\n");
+ if (!ksmbd_conn_need_setup(conn) && !ksmbd_conn_good(conn)) { + work->send_no_response = 1; + return rc; + } + WORK_BUFFERS(work, req, rsp);
rsp->StructureSize = cpu_to_le16(9); @@ -1707,44 +1715,38 @@ int smb2_sess_setup(struct ksmbd_work *work)
if (conn->dialect != sess->dialect) { rc = -EINVAL; - ksmbd_user_session_put(sess); goto out_err; }
if (!(req->hdr.Flags & SMB2_FLAGS_SIGNED)) { rc = -EINVAL; - ksmbd_user_session_put(sess); goto out_err; }
if (strncmp(conn->ClientGUID, sess->ClientGUID, SMB2_CLIENT_GUID_SIZE)) { rc = -ENOENT; - ksmbd_user_session_put(sess); goto out_err; }
if (sess->state == SMB2_SESSION_IN_PROGRESS) { rc = -EACCES; - ksmbd_user_session_put(sess); goto out_err; }
if (sess->state == SMB2_SESSION_EXPIRED) { rc = -EFAULT; - ksmbd_user_session_put(sess); goto out_err; } - ksmbd_user_session_put(sess);
if (ksmbd_conn_need_reconnect(conn)) { rc = -EFAULT; + ksmbd_user_session_put(sess); sess = NULL; goto out_err; }
- sess = ksmbd_session_lookup(conn, sess_id); - if (!sess) { + if (is_ksmbd_session_in_connection(conn, sess_id)) { rc = -EACCES; goto out_err; } @@ -1910,10 +1912,12 @@ int smb2_sess_setup(struct ksmbd_work *work)
sess->last_active = jiffies; sess->state = SMB2_SESSION_EXPIRED; + ksmbd_user_session_put(sess); + work->sess = NULL; if (try_delay) { ksmbd_conn_set_need_reconnect(conn); ssleep(5); - ksmbd_conn_set_need_negotiate(conn); + ksmbd_conn_set_need_setup(conn); } } smb2_set_err_rsp(work); @@ -2239,14 +2243,15 @@ int smb2_session_logoff(struct ksmbd_work *work) return -ENOENT; }
- ksmbd_destroy_file_table(&sess->file_table); down_write(&conn->session_lock); sess->state = SMB2_SESSION_EXPIRED; up_write(&conn->session_lock);
- ksmbd_free_user(sess->user); - sess->user = NULL; - ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE); + if (sess->user) { + ksmbd_free_user(sess->user); + sess->user = NULL; + } + ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP);
rsp->StructureSize = cpu_to_le16(4); err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp)); @@ -2708,6 +2713,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work, goto out; }
+ if (le16_to_cpu(context->DataOffset) + + le32_to_cpu(context->DataLength) < + sizeof(struct create_durable_reconn_v2_req)) { + err = -EINVAL; + goto out; + } + recon_v2 = (struct create_durable_reconn_v2_req *)context; persistent_id = recon_v2->Fid.PersistentFileId; dh_info->fp = ksmbd_lookup_durable_fd(persistent_id); @@ -2741,6 +2753,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work, goto out; }
+ if (le16_to_cpu(context->DataOffset) + + le32_to_cpu(context->DataLength) < + sizeof(struct create_durable_reconn_req)) { + err = -EINVAL; + goto out; + } + recon = (struct create_durable_reconn_req *)context; persistent_id = recon->Data.Fid.PersistentFileId; dh_info->fp = ksmbd_lookup_durable_fd(persistent_id); @@ -2766,6 +2785,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work, goto out; }
+ if (le16_to_cpu(context->DataOffset) + + le32_to_cpu(context->DataLength) < + sizeof(struct create_durable_req_v2)) { + err = -EINVAL; + goto out; + } + durable_v2_blob = (struct create_durable_req_v2 *)context; ksmbd_debug(SMB, "Request for durable v2 open\n"); diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c index 49b128698670..5aa7a66334d9 100644 --- a/fs/smb/server/smbacl.c +++ b/fs/smb/server/smbacl.c @@ -270,6 +270,11 @@ static int sid_to_id(struct mnt_idmap *idmap, return -EIO; }
+ if (psid->num_subauth == 0) { + pr_err("%s: zero subauthorities!\n", __func__); + return -EIO; + } + if (sidtype == SIDOWNER) { kuid_t uid; uid_t id; @@ -1026,7 +1031,9 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, struct dentry *parent = path->dentry->d_parent; struct mnt_idmap *idmap = mnt_idmap(path->mnt); int inherited_flags = 0, flags = 0, i, nt_size = 0, pdacl_size; - int rc = 0, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size; + int rc = 0, pntsd_type, pntsd_size, acl_len, aces_size; + unsigned int dacloffset; + size_t dacl_struct_end; u16 num_aces, ace_cnt = 0; char *aces_base; bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode); @@ -1035,8 +1042,11 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, parent, &parent_pntsd); if (pntsd_size <= 0) return -ENOENT; + dacloffset = le32_to_cpu(parent_pntsd->dacloffset); - if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) { + if (!dacloffset || + check_add_overflow(dacloffset, sizeof(struct smb_acl), &dacl_struct_end) || + dacl_struct_end > (size_t)pntsd_size) { rc = -EINVAL; goto free_parent_pntsd; } @@ -1240,7 +1250,9 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path, struct smb_ntsd *pntsd = NULL; struct smb_acl *pdacl; struct posix_acl *posix_acls; - int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset; + int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size; + unsigned int dacl_offset; + size_t dacl_struct_end; struct smb_sid sid; int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE); struct smb_ace *ace; @@ -1259,7 +1271,8 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
dacl_offset = le32_to_cpu(pntsd->dacloffset); if (!dacl_offset || - (dacl_offset + sizeof(struct smb_acl) > pntsd_size)) + check_add_overflow(dacl_offset, sizeof(struct smb_acl), &dacl_struct_end) || + dacl_struct_end > (size_t)pntsd_size) goto err_out;
pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset)); diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h index 8d0a6280e982..52b969c7cef9 100644 --- a/include/asm-generic/rwonce.h +++ b/include/asm-generic/rwonce.h @@ -79,10 +79,18 @@ unsigned long __read_once_word_nocheck(const void *addr) (typeof(x))__read_once_word_nocheck(&(x)); \ })
-static __no_kasan_or_inline +static __no_sanitize_or_inline unsigned long read_word_at_a_time(const void *addr) { + /* open-coded instrument_read(addr, 1) */ kasan_check_read(addr, 1); + kcsan_check_read(addr, 1); + + /* + * This load can race with concurrent stores to out-of-bounds memory, + * but READ_ONCE() can't be used because it requires higher alignment + * than plain loads in arm64 builds with LTO. + */ return *(unsigned long *)addr; }
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index e39de161c938..2cfe1d4bfc96 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -222,6 +222,13 @@ struct drm_dp_mst_branch { */ struct list_head destroy_next;
+ /** + * @rad: Relative Address of the MST branch. + * For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0, + * unset and unused. For MST branches connected after mst_primary, + * in each element of rad[] the nibbles are ordered by the most + * signifcant 4 bits first and the least significant 4 bits second. + */ u8 rad[8]; u8 lct; int num_ports; diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index ef817926cddd..94d365b22505 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -495,6 +495,11 @@ struct drm_memory_stats { enum drm_gem_object_status;
int drm_memory_stats_is_zero(const struct drm_memory_stats *stats); +void drm_fdinfo_print_size(struct drm_printer *p, + const char *prefix, + const char *stat, + const char *region, + u64 sz); void drm_print_memory_stats(struct drm_printer *p, const struct drm_memory_stats *stats, enum drm_gem_object_status supported_status, diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index 74169dd0f659..53f2837ce7df 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -176,6 +176,7 @@ void ffa_device_unregister(struct ffa_device *ffa_dev); int ffa_driver_register(struct ffa_driver *driver, struct module *owner, const char *mod_name); void ffa_driver_unregister(struct ffa_driver *driver); +void ffa_devices_unregister(void); bool ffa_device_is_valid(struct ffa_device *ffa_dev);
#else @@ -188,6 +189,8 @@ ffa_device_register(const struct ffa_partition_info *part_info,
static inline void ffa_device_unregister(struct ffa_device *dev) {}
+static inline void ffa_devices_unregister(void) {} + static inline int ffa_driver_register(struct ffa_driver *driver, struct module *owner, const char *mod_name) diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 13a11f3c09b8..aca06f300f83 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -1283,7 +1283,7 @@ struct virtchnl_proto_hdrs { * 2 - from the second inner layer * .... **/ - int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ + u32 count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ union { struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; @@ -1335,7 +1335,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
struct virtchnl_filter_action_set { /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */ - int count; + u32 count; struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS]; };
diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h index 670f2dae692f..996493917f36 100644 --- a/include/linux/badblocks.h +++ b/include/linux/badblocks.h @@ -48,11 +48,11 @@ struct badblocks_context { int ack; };
-int badblocks_check(struct badblocks *bb, sector_t s, int sectors, - sector_t *first_bad, int *bad_sectors); -int badblocks_set(struct badblocks *bb, sector_t s, int sectors, - int acknowledged); -int badblocks_clear(struct badblocks *bb, sector_t s, int sectors); +int badblocks_check(struct badblocks *bb, sector_t s, sector_t sectors, + sector_t *first_bad, sector_t *bad_sectors); +bool badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors, + int acknowledged); +bool badblocks_clear(struct badblocks *bb, sector_t s, sector_t sectors); void ack_all_badblocks(struct badblocks *bb); ssize_t badblocks_show(struct badblocks *bb, char *page, int unack); ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h index c50b5670c4a5..197916ee91a4 100644 --- a/include/linux/context_tracking_irq.h +++ b/include/linux/context_tracking_irq.h @@ -10,12 +10,12 @@ void ct_irq_exit_irqson(void); void ct_nmi_enter(void); void ct_nmi_exit(void); #else -static inline void ct_irq_enter(void) { } -static inline void ct_irq_exit(void) { } +static __always_inline void ct_irq_enter(void) { } +static __always_inline void ct_irq_exit(void) { } static inline void ct_irq_enter_irqson(void) { } static inline void ct_irq_exit_irqson(void) { } -static inline void ct_nmi_enter(void) { } -static inline void ct_nmi_exit(void) { } +static __always_inline void ct_nmi_enter(void) { } +static __always_inline void ct_nmi_exit(void) { } #endif
#endif diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 17276965ff1d..6ddcbb8be516 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -649,6 +649,10 @@ extern int coresight_enable_sysfs(struct coresight_device *csdev); extern void coresight_disable_sysfs(struct coresight_device *csdev); extern int coresight_timeout(struct csdev_access *csa, u32 offset, int position, int value); +typedef void (*coresight_timeout_cb_t) (struct csdev_access *, u32, int, int); +extern int coresight_timeout_action(struct csdev_access *csa, u32 offset, + int position, int value, + coresight_timeout_cb_t cb);
extern int coresight_claim_device(struct coresight_device *csdev); extern int coresight_claim_device_unlocked(struct coresight_device *csdev); diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 835e7b793f6a..5466c96a33db 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -125,9 +125,11 @@ static inline int cpuset_do_page_mem_spread(void)
extern bool current_cpuset_is_being_rebound(void);
+extern void dl_rebuild_rd_accounting(void); extern void rebuild_sched_domains(void);
extern void cpuset_print_current_mems_allowed(void); +extern void cpuset_reset_sched_domains(void);
/* * read_mems_allowed_begin is required when making decisions involving @@ -259,11 +261,20 @@ static inline bool current_cpuset_is_being_rebound(void) return false; }
+static inline void dl_rebuild_rd_accounting(void) +{ +} + static inline void rebuild_sched_domains(void) { partition_sched_domains(1, NULL, NULL); }
+static inline void cpuset_reset_sched_domains(void) +{ + partition_sched_domains(1, NULL, NULL); +} + static inline void cpuset_print_current_mems_allowed(void) { } diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index d7e30d4f7503..f3bc0bcd7098 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -78,14 +78,18 @@ static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map) #define phys_to_dma_unencrypted phys_to_dma #endif #else -static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev, - phys_addr_t paddr) +static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) { if (dev->dma_range_map) return translate_phys_to_dma(dev, paddr); return paddr; }
+static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev, + phys_addr_t paddr) +{ + return dma_addr_unencrypted(__phys_to_dma(dev, paddr)); +} /* * If memory encryption is supported, phys_to_dma will set the memory encryption * bit in the DMA address, and dma_to_phys will clear it. @@ -94,19 +98,20 @@ static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev, */ static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { - return __sme_set(phys_to_dma_unencrypted(dev, paddr)); + return dma_addr_encrypted(__phys_to_dma(dev, paddr)); }
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) { phys_addr_t paddr;
+ dma_addr = dma_addr_canonical(dma_addr); if (dev->dma_range_map) paddr = translate_dma_to_phys(dev, dma_addr); else paddr = dma_addr;
- return __sme_clr(paddr); + return paddr; } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 0731994b9d7c..6fa0a268d538 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -91,7 +91,7 @@ struct fwnode_endpoint { #define SWNODE_GRAPH_PORT_NAME_FMT "port@%u" #define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u"
-#define NR_FWNODE_REFERENCE_ARGS 8 +#define NR_FWNODE_REFERENCE_ARGS 16
/** * struct fwnode_reference_args - Fwnode reference with additional arguments diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 3ff96ae31bf6..c5fe3b2a53e8 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -65,11 +65,9 @@ struct br_ip_list { #define BR_DEFAULT_AGEING_TIME (300 * HZ)
struct net_bridge; -void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br, - unsigned int cmd, struct ifreq *ifr, +void brioctl_set(int (*hook)(struct net *net, unsigned int cmd, void __user *uarg)); -int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg); +int br_ioctl_call(struct net *net, unsigned int cmd, void __user *uarg);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h index e5de7a124bad..66f830ab9b49 100644 --- a/include/linux/iio/iio-gts-helper.h +++ b/include/linux/iio/iio-gts-helper.h @@ -208,5 +208,6 @@ int iio_gts_all_avail_scales(struct iio_gts *gts, const int **vals, int *type, int *length); int iio_gts_avail_scales_for_time(struct iio_gts *gts, int time, const int **vals, int *type, int *length); +int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time);
#endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 56161e02f002..5ed03e36178f 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -10,6 +10,7 @@ #include <linux/device.h> #include <linux/cdev.h> #include <linux/cleanup.h> +#include <linux/compiler_types.h> #include <linux/slab.h> #include <linux/iio/types.h> /* IIO TODO LIST */ @@ -662,6 +663,31 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); int iio_device_claim_direct_mode(struct iio_dev *indio_dev); void iio_device_release_direct_mode(struct iio_dev *indio_dev);
+/* + * Helper functions that allow claim and release of direct mode + * in a fashion that doesn't generate many false positives from sparse. + * Note this must remain static inline in the header so that sparse + * can see the __acquire() marking. Revisit when sparse supports + * __cond_acquires() + */ +static inline bool iio_device_claim_direct(struct iio_dev *indio_dev) +{ + int ret = iio_device_claim_direct_mode(indio_dev); + + if (ret) + return false; + + __acquire(iio_dev); + + return true; +} + +static inline void iio_device_release_direct(struct iio_dev *indio_dev) +{ + iio_device_release_direct_mode(indio_dev); + __release(indio_dev); +} + /* * This autocleanup logic is normally used via * iio_device_claim_direct_scoped(). diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 8cd9327e4e78..a1b1be9bf73b 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -448,7 +448,7 @@ irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, static inline void disable_irq_nosync_lockdep(unsigned int irq) { disable_irq_nosync(irq); -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_disable(); #endif } @@ -456,7 +456,7 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq) static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) { disable_irq_nosync(irq); -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_save(*flags); #endif } @@ -471,7 +471,7 @@ static inline void disable_irq_lockdep(unsigned int irq)
static inline void enable_irq_lockdep(unsigned int irq) { -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_enable(); #endif enable_irq(irq); @@ -479,7 +479,7 @@ static inline void enable_irq_lockdep(unsigned int irq)
static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) { -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_restore(*flags); #endif enable_irq(irq); diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index ae4526389261..07584c5e36fb 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -26,11 +26,34 @@ */ #define __sme_set(x) ((x) | sme_me_mask) #define __sme_clr(x) ((x) & ~sme_me_mask) + +#define dma_addr_encrypted(x) __sme_set(x) +#define dma_addr_canonical(x) __sme_clr(x) + #else #define __sme_set(x) (x) #define __sme_clr(x) (x) #endif
+/* + * dma_addr_encrypted() and dma_addr_unencrypted() are for converting a given DMA + * address to the respective type of addressing. + * + * dma_addr_canonical() is used to reverse any conversions for encrypted/decrypted + * back to the canonical address. + */ +#ifndef dma_addr_encrypted +#define dma_addr_encrypted(x) (x) +#endif + +#ifndef dma_addr_unencrypted +#define dma_addr_unencrypted(x) (x) +#endif + +#ifndef dma_addr_canonical +#define dma_addr_canonical(x) (x) +#endif + #endif /* __ASSEMBLY__ */
#endif /* __MEM_ENCRYPT_H__ */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index f00bfcee7120..108862d81b57 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -250,6 +250,10 @@ struct nfs_server { struct list_head ss_copies; struct list_head ss_src_copies;
+ unsigned long delegation_flags; +#define NFS4SERV_DELEGRETURN (1) +#define NFS4SERV_DELEGATION_EXPIRED (2) +#define NFS4SERV_DELEGRETURN_DELAYED (3) unsigned long delegation_gen; unsigned long mig_gen; unsigned long mig_status; diff --git a/include/linux/nmi.h b/include/linux/nmi.h index a8dfb38c9bb6..e78fa535f61d 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -17,7 +17,6 @@ void lockup_detector_init(void); void lockup_detector_retry_init(void); void lockup_detector_soft_poweroff(void); -void lockup_detector_cleanup(void);
extern int watchdog_user_enabled; extern int watchdog_thresh; @@ -37,7 +36,6 @@ extern int sysctl_hardlockup_all_cpu_backtrace; static inline void lockup_detector_init(void) { } static inline void lockup_detector_retry_init(void) { } static inline void lockup_detector_soft_poweroff(void) { } -static inline void lockup_detector_cleanup(void) { } #endif /* !CONFIG_LOCKUP_DETECTOR */
#ifdef CONFIG_SOFTLOCKUP_DETECTOR @@ -104,12 +102,10 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs); #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) extern void hardlockup_detector_perf_stop(void); extern void hardlockup_detector_perf_restart(void); -extern void hardlockup_detector_perf_cleanup(void); extern void hardlockup_config_perf_event(const char *str); #else static inline void hardlockup_detector_perf_stop(void) { } static inline void hardlockup_detector_perf_restart(void) { } -static inline void hardlockup_detector_perf_cleanup(void) { } static inline void hardlockup_config_perf_event(const char *str) { } #endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8333f132f4a9..bcb764c3a803 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -495,7 +495,7 @@ struct pmu { * context-switches callback */ void (*sched_task) (struct perf_event_pmu_context *pmu_ctx, - bool sched_in); + struct task_struct *task, bool sched_in);
/* * Kmem cache of PMU specific data @@ -1020,6 +1020,41 @@ struct perf_event_context { local_t nr_no_switch_fast; };
+/** + * struct perf_ctx_data - PMU specific data for a task + * @rcu_head: To avoid the race on free PMU specific data + * @refcount: To track users + * @global: To track system-wide users + * @ctx_cache: Kmem cache of PMU specific data + * @data: PMU specific data + * + * Currently, the struct is only used in Intel LBR call stack mode to + * save/restore the call stack of a task on context switches. + * + * The rcu_head is used to prevent the race on free the data. + * The data only be allocated when Intel LBR call stack mode is enabled. + * The data will be freed when the mode is disabled. + * The content of the data will only be accessed in context switch, which + * should be protected by rcu_read_lock(). + * + * Because of the alignment requirement of Intel Arch LBR, the Kmem cache + * is used to allocate the PMU specific data. The ctx_cache is to track + * the Kmem cache. + * + * Careful: Struct perf_ctx_data is added as a pointer in struct task_struct. + * When system-wide Intel LBR call stack mode is enabled, a buffer with + * constant size will be allocated for each task. + * Also, system memory consumption can further grow when the size of + * struct perf_ctx_data enlarges. + */ +struct perf_ctx_data { + struct rcu_head rcu_head; + refcount_t refcount; + int global; + struct kmem_cache *ctx_cache; + void *data; +}; + struct perf_cpu_pmu_context { struct perf_event_pmu_context epc; struct perf_event_pmu_context *task_epc; diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 94d267d02372..4c107e17c547 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1508,14 +1508,25 @@ static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, }
/* - * track_pfn_copy is called when vma that is covering the pfnmap gets - * copied through copy_page_range(). + * track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page + * tables copied during copy_page_range(). On success, stores the pfn to be + * passed to untrack_pfn_copy(). */ -static inline int track_pfn_copy(struct vm_area_struct *vma) +static inline int track_pfn_copy(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, unsigned long *pfn) { return 0; }
+/* + * untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during + * copy_page_range(), but after track_pfn_copy() was already called. + */ +static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma, + unsigned long pfn) +{ +} + /* * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or @@ -1528,8 +1539,10 @@ static inline void untrack_pfn(struct vm_area_struct *vma, }
/* - * untrack_pfn_clear is called while mremapping a pfnmap for a new region - * or fails to copy pgtable during duplicate vm area. + * untrack_pfn_clear is called in the following cases on a VM_PFNMAP VMA: + * + * 1) During mremap() on the src VMA after the page tables were moved. + * 2) During fork() on the dst VMA, immediately after duplicating the src VMA. */ static inline void untrack_pfn_clear(struct vm_area_struct *vma) { @@ -1540,7 +1553,10 @@ extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long size); extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn); -extern int track_pfn_copy(struct vm_area_struct *vma); +extern int track_pfn_copy(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, unsigned long *pfn); +extern void untrack_pfn_copy(struct vm_area_struct *dst_vma, + unsigned long pfn); extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size, bool mm_wr_locked); extern void untrack_pfn_clear(struct vm_area_struct *vma); diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index d39dc863f612..d0b29cd1fd20 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -66,6 +66,7 @@ static inline bool queue_pm_work(struct work_struct *work)
extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); +extern bool pm_runtime_need_not_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); extern int pm_runtime_force_resume(struct device *dev);
@@ -241,6 +242,7 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; }
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; } static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 48e5c03df1dd..bd69ddc102fb 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -138,7 +138,7 @@ static inline void rcu_sysrq_end(void) { } #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) void rcu_irq_work_resched(void); #else -static inline void rcu_irq_work_resched(void) { } +static __always_inline void rcu_irq_work_resched(void) { } #endif
#ifdef CONFIG_RCU_NOCB_CPU diff --git a/include/linux/reboot.h b/include/linux/reboot.h index abcdde4df697..e97f6b8e8586 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -177,16 +177,28 @@ void ctrl_alt_del(void);
extern void orderly_poweroff(bool force); extern void orderly_reboot(void); -void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown); + +/** + * enum hw_protection_action - Hardware protection action + * + * @HWPROT_ACT_SHUTDOWN: + * The system should be shut down (powered off) for HW protection. + * @HWPROT_ACT_REBOOT: + * The system should be rebooted for HW protection. + */ +enum hw_protection_action { HWPROT_ACT_SHUTDOWN, HWPROT_ACT_REBOOT }; + +void __hw_protection_shutdown(const char *reason, int ms_until_forced, + enum hw_protection_action action);
static inline void hw_protection_reboot(const char *reason, int ms_until_forced) { - __hw_protection_shutdown(reason, ms_until_forced, false); + __hw_protection_shutdown(reason, ms_until_forced, HWPROT_ACT_REBOOT); }
static inline void hw_protection_shutdown(const char *reason, int ms_until_forced) { - __hw_protection_shutdown(reason, ms_until_forced, true); + __hw_protection_shutdown(reason, ms_until_forced, HWPROT_ACT_SHUTDOWN); }
/* diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c15365a30c0..6e5c38718ff5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -65,6 +65,7 @@ struct mempolicy; struct nameidata; struct nsproxy; struct perf_event_context; +struct perf_ctx_data; struct pid_namespace; struct pipe_inode_info; struct rcu_node; @@ -382,6 +383,11 @@ enum uclamp_id { #ifdef CONFIG_SMP extern struct root_domain def_root_domain; extern struct mutex sched_domains_mutex; +extern void sched_domains_mutex_lock(void); +extern void sched_domains_mutex_unlock(void); +#else +static inline void sched_domains_mutex_lock(void) { } +static inline void sched_domains_mutex_unlock(void) { } #endif
struct sched_param { @@ -1311,6 +1317,7 @@ struct task_struct { struct perf_event_context *perf_event_ctxp; struct mutex perf_event_mutex; struct list_head perf_event_list; + struct perf_ctx_data __rcu *perf_ctx_data; #endif #ifdef CONFIG_DEBUG_PREEMPT unsigned long preempt_disable_ip; diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 3a912ab42bb5..f9aabbc9d22e 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -34,7 +34,11 @@ static inline bool dl_time_before(u64 a, u64 b) struct root_domain; extern void dl_add_task_root_domain(struct task_struct *p); extern void dl_clear_root_domain(struct root_domain *rd); +extern void dl_clear_root_domain_cpu(int cpu);
#endif /* CONFIG_SMP */
+extern u64 dl_cookie; +extern bool dl_bw_visited(int cpu, u64 cookie); + #endif /* _LINUX_SCHED_DEADLINE_H */ diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h index fb1e295e7e63..166b19af956f 100644 --- a/include/linux/sched/smt.h +++ b/include/linux/sched/smt.h @@ -12,7 +12,7 @@ static __always_inline bool sched_smt_active(void) return static_branch_likely(&sched_smt_present); } #else -static inline bool sched_smt_active(void) { return false; } +static __always_inline bool sched_smt_active(void) { return false; } #endif
void arch_smt_update(void); diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index e45531455d3b..d55949071c30 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -22,8 +22,9 @@ #include <linux/atomic.h> #include <asm/seccomp.h>
-#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER extern int __secure_computing(const struct seccomp_data *sd); + +#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER static inline int secure_computing(void) { if (unlikely(test_syscall_work(SECCOMP))) @@ -32,11 +33,6 @@ static inline int secure_computing(void) } #else extern void secure_computing_strict(int this_syscall); -static inline int __secure_computing(const struct seccomp_data *sd) -{ - secure_computing_strict(sd->nr); - return 0; -} #endif
extern long prctl_get_seccomp(void); diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 69f9bedd0ee8..0b5ed6821080 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -86,8 +86,6 @@ struct thermal_trip { #define THERMAL_TRIP_PRIV_TO_INT(_val_) (uintptr_t)(_val_) #define THERMAL_INT_TO_TRIP_PRIV(_val_) (void *)(uintptr_t)(_val_)
-struct thermal_zone_device; - struct cooling_spec { unsigned long upper; /* Highest cooling state */ unsigned long lower; /* Lowest cooling state */ diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index b1df7d792fa1..a6bec560bdbc 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -39,6 +39,8 @@ struct page;
#define MAX_URETPROBE_DEPTH 64
+#define UPROBE_NO_TRAMPOLINE_VADDR (~0UL) + struct uprobe_consumer { /* * handler() can return UPROBE_HANDLER_REMOVE to signal the need to diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d11b903c2edb..58bda3347914 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -313,6 +313,30 @@ static inline void cgroup_writeback_umount(struct super_block *sb) /* * mm/page-writeback.c */ +/* consolidated parameters for balance_dirty_pages() and its subroutines */ +struct dirty_throttle_control { +#ifdef CONFIG_CGROUP_WRITEBACK + struct wb_domain *dom; + struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */ +#endif + struct bdi_writeback *wb; + struct fprop_local_percpu *wb_completions; + + unsigned long avail; /* dirtyable */ + unsigned long dirty; /* file_dirty + write + nfs */ + unsigned long thresh; /* dirty threshold */ + unsigned long bg_thresh; /* dirty background threshold */ + unsigned long limit; /* hard dirty limit */ + + unsigned long wb_dirty; /* per-wb counterparts */ + unsigned long wb_thresh; + unsigned long wb_bg_thresh; + + unsigned long pos_ratio; + bool freerun; + bool dirty_exceeded; +}; + void laptop_io_completion(struct backing_dev_info *info); void laptop_sync_completion(void); void laptop_mode_timer_fn(struct timer_list *t); diff --git a/include/net/ax25.h b/include/net/ax25.h index 4ee141aae0a2..a7bba42dde15 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -418,7 +418,6 @@ void ax25_rt_device_down(struct net_device *); int ax25_rt_ioctl(unsigned int, void __user *); extern const struct seq_operations ax25_rt_seqops; ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev); -int ax25_rt_autobind(ax25_cb *, ax25_address *); struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *); void ax25_rt_free(void); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 3ec915738112..a8586c3058c7 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -208,6 +208,13 @@ enum { */ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
+ /* When this quirk is set consider Sync Flow Control as supported by + * the driver. + * + * This quirk must be set before hci_register_dev is called. + */ + HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, + /* When this quirk is set, the LE states reported through the * HCI_LE_READ_SUPPORTED_STATES are invalid/broken. * @@ -354,6 +361,22 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, + + /* When this quirk is set, the HCI_OP_READ_VOICE_SETTING command is + * skipped. This is required for a subset of the CSR controller clones + * which erroneously claim to support it. + * + * This quirk must be set before hci_register_dev is called. + */ + HCI_QUIRK_BROKEN_READ_VOICE_SETTING, + + /* When this quirk is set, the HCI_OP_READ_PAGE_SCAN_TYPE command is + * skipped. This is required for a subset of the CSR controller clones + * which erroneously claim to support it. + * + * This quirk must be set before hci_register_dev is called. + */ + HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, };
/* HCI device flags */ @@ -432,6 +455,7 @@ enum { HCI_WIDEBAND_SPEECH_ENABLED, HCI_EVENT_FILTER_CONFIGURED, HCI_PA_SYNC, + HCI_SCO_FLOWCTL,
HCI_DUT_MODE, HCI_VENDOR_DIAG, @@ -855,6 +879,11 @@ struct hci_cp_remote_name_req_cancel { bdaddr_t bdaddr; } __packed;
+struct hci_rp_remote_name_req_cancel { + __u8 status; + bdaddr_t bdaddr; +} __packed; + #define HCI_OP_READ_REMOTE_FEATURES 0x041b struct hci_cp_read_remote_features { __le16 handle; @@ -1528,6 +1557,11 @@ struct hci_rp_read_tx_power { __s8 tx_power; } __packed;
+#define HCI_OP_WRITE_SYNC_FLOWCTL 0x0c2f +struct hci_cp_write_sync_flowctl { + __u8 enable; +} __packed; + #define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46 struct hci_rp_read_page_scan_type { __u8 status; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 6281063cbd8e..f0b49aad519e 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1858,6 +1858,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD) #define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF) #define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK) +#define lmp_sco_capable(dev) ((dev)->features[0][1] & LMP_SCO) #define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ) #define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO) #define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR)) @@ -1925,6 +1926,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn); ((dev)->commands[20] & 0x10 && \ !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
+#define read_voice_setting_capable(dev) \ + ((dev)->commands[9] & 0x04 && \ + !test_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &(dev)->quirks)) + /* Use enhanced synchronous connection if command is supported and its quirk * has not been set. */ diff --git a/include/net/bonding.h b/include/net/bonding.h index 8bb5f016969f..95f67b308c19 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -695,6 +695,7 @@ void bond_debug_register(struct bonding *bond); void bond_debug_unregister(struct bonding *bond); void bond_debug_reregister(struct bonding *bond); const char *bond_mode_name(int mode); +bool bond_xdp_check(struct bonding *bond, int mode); void bond_setup(struct net_device *bond_dev); unsigned int bond_get_num_tx_queues(void); int bond_netlink_init(void); diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index bfe625b55d55..a58ae7589d12 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -110,11 +110,16 @@ struct xdp_sock { * indicates position where checksumming should start. * csum_offset indicates position where checksum should be stored. * + * void (*tmo_request_launch_time)(u64 launch_time, void *priv) + * Called when AF_XDP frame requested launch time HW offload support. + * launch_time indicates the PTP time at which the device can schedule the + * packet for transmission. */ struct xsk_tx_metadata_ops { void (*tmo_request_timestamp)(void *priv); u64 (*tmo_fill_timestamp)(void *priv); void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv); + void (*tmo_request_launch_time)(u64 launch_time, void *priv); };
#ifdef CONFIG_XDP_SOCKETS @@ -162,6 +167,11 @@ static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta, if (!meta) return;
+ if (ops->tmo_request_launch_time) + if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME) + ops->tmo_request_launch_time(meta->request.launch_time, + priv); + if (ops->tmo_request_timestamp) if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP) ops->tmo_request_timestamp(priv); diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 784cd34f5bba..fe4afb251719 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -199,6 +199,7 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) #define XDP_TXMD_FLAGS_VALID ( \ XDP_TXMD_FLAGS_TIMESTAMP | \ XDP_TXMD_FLAGS_CHECKSUM | \ + XDP_TXMD_FLAGS_LAUNCH_TIME | \ 0)
static inline bool diff --git a/include/net/xfrm.h b/include/net/xfrm.h index ed4b83696c77..e1eed5d47d07 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -464,6 +464,15 @@ struct xfrm_type_offload {
int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); +void xfrm_set_type_offload(struct xfrm_state *x); +static inline void xfrm_unset_type_offload(struct xfrm_state *x) +{ + if (!x->type_offload) + return; + + module_put(x->type_offload->owner); + x->type_offload = NULL; +}
/** * struct xfrm_mode_cbs - XFRM mode callbacks @@ -1760,7 +1769,7 @@ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack); u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); -int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, +int __xfrm_init_state(struct xfrm_state *x, bool init_replay, struct netlink_ext_ack *extack); int xfrm_init_state(struct xfrm_state *x); int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 0ad104dae253..43954bb0475a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2750,6 +2750,7 @@ struct ib_device { * It is a NULL terminated array. */ const struct attribute_group *groups[4]; + u8 hw_stats_attr_index;
u64 uverbs_cmd_mask;
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index e1c1079f8c8d..ed52d0506c69 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -76,6 +76,10 @@ #define DECLARE_TRACE(name, proto, args) \ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
+#undef DECLARE_TRACE_CONDITION +#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ + DEFINE_TRACE(name, PARAMS(proto), PARAMS(args)) + /* If requested, create helpers for calling these tracepoints from Rust. */ #ifdef CREATE_RUST_TRACE_POINTS #undef DEFINE_RUST_DO_TRACE @@ -108,6 +112,8 @@ /* Make all open coded DECLARE_TRACE nops */ #undef DECLARE_TRACE #define DECLARE_TRACE(name, proto, args) +#undef DECLARE_TRACE_CONDITION +#define DECLARE_TRACE_CONDITION(name, proto, args, cond)
#ifdef TRACEPOINTS_ENABLED #include <trace/trace_events.h> @@ -129,6 +135,7 @@ #undef DEFINE_EVENT_CONDITION #undef TRACE_HEADER_MULTI_READ #undef DECLARE_TRACE +#undef DECLARE_TRACE_CONDITION
/* Only undef what we defined in this file */ #ifdef UNDEF_TRACE_INCLUDE_FILE diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index a261e86e61fa..5755c2a569e1 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -629,11 +629,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, TRACE_EVENT(balance_dirty_pages,
TP_PROTO(struct bdi_writeback *wb, - unsigned long thresh, - unsigned long bg_thresh, - unsigned long dirty, - unsigned long bdi_thresh, - unsigned long bdi_dirty, + struct dirty_throttle_control *dtc, unsigned long dirty_ratelimit, unsigned long task_ratelimit, unsigned long dirtied, @@ -641,7 +637,7 @@ TRACE_EVENT(balance_dirty_pages, long pause, unsigned long start_time),
- TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, + TP_ARGS(wb, dtc, dirty_ratelimit, task_ratelimit, dirtied, period, pause, start_time),
@@ -664,16 +660,15 @@ TRACE_EVENT(balance_dirty_pages, ),
TP_fast_assign( - unsigned long freerun = (thresh + bg_thresh) / 2; + unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2; strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
- __entry->limit = global_wb_domain.dirty_limit; - __entry->setpoint = (global_wb_domain.dirty_limit + - freerun) / 2; - __entry->dirty = dirty; + __entry->limit = dtc->limit; + __entry->setpoint = (dtc->limit + freerun) / 2; + __entry->dirty = dtc->dirty; __entry->bdi_setpoint = __entry->setpoint * - bdi_thresh / (thresh + 1); - __entry->bdi_dirty = bdi_dirty; + dtc->wb_thresh / (dtc->thresh + 1); + __entry->bdi_dirty = dtc->wb_dirty; __entry->dirty_ratelimit = KBps(dirty_ratelimit); __entry->task_ratelimit = KBps(task_ratelimit); __entry->dirtied = dirtied; diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index 42ec5ddaab8d..42869770776e 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -127,6 +127,12 @@ struct xdp_options { */ #define XDP_TXMD_FLAGS_CHECKSUM (1 << 1)
+/* Request launch time hardware offload. The device will schedule the packet for + * transmission at a pre-determined time called launch time. The value of + * launch time is communicated via launch_time field of struct xsk_tx_metadata. + */ +#define XDP_TXMD_FLAGS_LAUNCH_TIME (1 << 2) + /* AF_XDP offloads request. 'request' union member is consumed by the driver * when the packet is being transmitted. 'completion' union member is * filled by the driver when the transmit completion arrives. @@ -142,6 +148,10 @@ struct xsk_tx_metadata { __u16 csum_start; /* Offset from csum_start where checksum should be stored. */ __u16 csum_offset; + + /* XDP_TXMD_FLAGS_LAUNCH_TIME */ + /* Launch time in nanosecond against the PTP HW Clock */ + __u64 launch_time; } request;
struct { diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h index e4be227d3ad6..4324e89a8026 100644 --- a/include/uapi/linux/netdev.h +++ b/include/uapi/linux/netdev.h @@ -59,10 +59,13 @@ enum netdev_xdp_rx_metadata { * by the driver. * @NETDEV_XSK_FLAGS_TX_CHECKSUM: L3 checksum HW offload is supported by the * driver. + * @NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO: Launch time HW offload is supported + * by the driver. */ enum netdev_xsk_flags { NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, + NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO = 4, };
enum netdev_queue_type { diff --git a/init/Kconfig b/init/Kconfig index 324c2886b2ea..5ab47c346ef9 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -129,6 +129,11 @@ config CC_HAS_COUNTED_BY # https://github.com/llvm/llvm-project/pull/112636 depends on !(CC_IS_CLANG && CLANG_VERSION < 190103)
+config LD_CAN_USE_KEEP_IN_OVERLAY + # ld.lld prior to 21.0.0 did not support KEEP within an overlay description + # https://github.com/llvm/llvm-project/pull/130661 + def_bool LD_IS_BFD || LLD_VERSION >= 210000 + config RUSTC_HAS_COERCE_POINTEE def_bool RUSTC_VERSION >= 108400
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 91019b4d0308..24f06fba4309 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -160,9 +160,9 @@ static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound) }
static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq, - struct io_wq_work *work) + unsigned int work_flags) { - return io_get_acct(wq, !(atomic_read(&work->flags) & IO_WQ_WORK_UNBOUND)); + return io_get_acct(wq, !(work_flags & IO_WQ_WORK_UNBOUND)); }
static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker) @@ -452,9 +452,14 @@ static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker) } }
+static inline unsigned int __io_get_work_hash(unsigned int work_flags) +{ + return work_flags >> IO_WQ_HASH_SHIFT; +} + static inline unsigned int io_get_work_hash(struct io_wq_work *work) { - return atomic_read(&work->flags) >> IO_WQ_HASH_SHIFT; + return __io_get_work_hash(atomic_read(&work->flags)); }
static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash) @@ -484,17 +489,19 @@ static struct io_wq_work *io_get_next_work(struct io_wq_acct *acct, struct io_wq *wq = worker->wq;
wq_list_for_each(node, prev, &acct->work_list) { + unsigned int work_flags; unsigned int hash;
work = container_of(node, struct io_wq_work, list);
/* not hashed, can run anytime */ - if (!io_wq_is_hashed(work)) { + work_flags = atomic_read(&work->flags); + if (!__io_wq_is_hashed(work_flags)) { wq_list_del(&acct->work_list, node, prev); return work; }
- hash = io_get_work_hash(work); + hash = __io_get_work_hash(work_flags); /* all items with this hash lie in [work, tail] */ tail = wq->hash_tail[hash];
@@ -591,12 +598,15 @@ static void io_worker_handle_work(struct io_wq_acct *acct, /* handle a whole dependent link */ do { struct io_wq_work *next_hashed, *linked; - unsigned int hash = io_get_work_hash(work); + unsigned int work_flags = atomic_read(&work->flags); + unsigned int hash = __io_wq_is_hashed(work_flags) + ? __io_get_work_hash(work_flags) + : -1U;
next_hashed = wq_next_work(work);
if (do_kill && - (atomic_read(&work->flags) & IO_WQ_WORK_UNBOUND)) + (work_flags & IO_WQ_WORK_UNBOUND)) atomic_or(IO_WQ_WORK_CANCEL, &work->flags); wq->do_work(work); io_assign_current_work(worker, NULL); @@ -916,19 +926,19 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq) } while (work); }
-static void io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work) +static void io_wq_insert_work(struct io_wq *wq, struct io_wq_acct *acct, + struct io_wq_work *work, unsigned int work_flags) { - struct io_wq_acct *acct = io_work_get_acct(wq, work); unsigned int hash; struct io_wq_work *tail;
- if (!io_wq_is_hashed(work)) { + if (!__io_wq_is_hashed(work_flags)) { append: wq_list_add_tail(&work->list, &acct->work_list); return; }
- hash = io_get_work_hash(work); + hash = __io_get_work_hash(work_flags); tail = wq->hash_tail[hash]; wq->hash_tail[hash] = work; if (!tail) @@ -944,8 +954,8 @@ static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) { - struct io_wq_acct *acct = io_work_get_acct(wq, work); unsigned int work_flags = atomic_read(&work->flags); + struct io_wq_acct *acct = io_work_get_acct(wq, work_flags); struct io_cb_cancel_data match = { .fn = io_wq_work_match_item, .data = work, @@ -964,7 +974,7 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) }
raw_spin_lock(&acct->lock); - io_wq_insert_work(wq, work); + io_wq_insert_work(wq, acct, work, work_flags); clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); raw_spin_unlock(&acct->lock);
@@ -1034,10 +1044,10 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data) }
static inline void io_wq_remove_pending(struct io_wq *wq, + struct io_wq_acct *acct, struct io_wq_work *work, struct io_wq_work_node *prev) { - struct io_wq_acct *acct = io_work_get_acct(wq, work); unsigned int hash = io_get_work_hash(work); struct io_wq_work *prev_work = NULL;
@@ -1064,7 +1074,7 @@ static bool io_acct_cancel_pending_work(struct io_wq *wq, work = container_of(node, struct io_wq_work, list); if (!match->fn(work, match->data)) continue; - io_wq_remove_pending(wq, work, prev); + io_wq_remove_pending(wq, acct, work, prev); raw_spin_unlock(&acct->lock); io_run_cancel(work, wq); match->nr_pending++; diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h index b3b004a7b625..d4fb2940e435 100644 --- a/io_uring/io-wq.h +++ b/io_uring/io-wq.h @@ -54,9 +54,14 @@ int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask); int io_wq_max_workers(struct io_wq *wq, int *new_count); bool io_wq_worker_stopped(void);
+static inline bool __io_wq_is_hashed(unsigned int work_flags) +{ + return work_flags & IO_WQ_WORK_HASHED; +} + static inline bool io_wq_is_hashed(struct io_wq_work *work) { - return atomic_read(&work->flags) & IO_WQ_WORK_HASHED; + return __io_wq_is_hashed(atomic_read(&work->flags)); }
typedef bool (work_cancel_fn)(struct io_wq_work *, void *); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index f7acae5f7e1d..4910ee7ac18a 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -899,7 +899,7 @@ static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires * the submitter task context, IOPOLL protects with uring_lock. */ - if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) { + if (ctx->lockless_cq || (req->flags & REQ_F_REISSUE)) { req->io_task_work.func = io_req_task_complete; io_req_task_work_add(req); return; @@ -3922,6 +3922,7 @@ static int __init io_uring_init(void) SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64); + BUG_ON(!iou_wq);
#ifdef CONFIG_SYSCTL register_sysctl_init("kernel", kernel_io_uring_disabled_table); diff --git a/io_uring/net.c b/io_uring/net.c index 50e8a3ccc9de..16d54cd4d53f 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -76,6 +76,8 @@ struct io_sr_msg { /* initialised and used only by !msg send variants */ u16 buf_group; u16 buf_index; + bool retry; + bool imported; /* only for io_send_zc */ void __user *msg_control; /* used only for send zerocopy */ struct io_kiocb *notif; @@ -187,6 +189,7 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req,
req->flags &= ~REQ_F_BL_EMPTY; sr->done_io = 0; + sr->retry = false; sr->len = 0; /* get from the provided buffer */ req->buf_index = sr->buf_group; } @@ -404,6 +407,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
sr->done_io = 0; + sr->retry = false;
if (req->opcode != IORING_OP_SEND) { if (sqe->addr2 || sqe->file_index) @@ -786,6 +790,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
sr->done_io = 0; + sr->retry = false;
if (unlikely(sqe->file_index || sqe->addr2)) return -EINVAL; @@ -834,6 +839,9 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_recvmsg_prep_setup(req); }
+/* bits to clear in old and inherit in new cflags on bundle retry */ +#define CQE_F_MASK (IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE) + /* * Finishes io_recv and io_recvmsg. * @@ -853,9 +861,19 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, if (sr->flags & IORING_RECVSEND_BUNDLE) { cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags); + if (sr->retry) + cflags = req->cqe.flags | (cflags & CQE_F_MASK); /* bundle with no more immediate buffers, we're done */ if (req->flags & REQ_F_BL_EMPTY) goto finish; + /* if more is available, retry and append to this one */ + if (!sr->retry && kmsg->msg.msg_inq > 0 && *ret > 0) { + req->cqe.flags = cflags & ~CQE_F_MASK; + sr->len = kmsg->msg.msg_inq; + sr->done_io += *ret; + sr->retry = true; + return false; + } } else { cflags |= io_put_kbuf(req, *ret, issue_flags); } @@ -1234,6 +1252,8 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_kiocb *notif;
zc->done_io = 0; + zc->retry = false; + zc->imported = false; req->flags |= REQ_F_POLL_NO_LAZY;
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) @@ -1396,7 +1416,8 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) (zc->flags & IORING_RECVSEND_POLL_FIRST)) return -EAGAIN;
- if (!zc->done_io) { + if (!zc->imported) { + zc->imported = true; ret = io_send_zc_import(req, issue_flags); if (unlikely(ret)) return ret; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index da729cbbaeb9..a0200fbbace9 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2290,17 +2290,18 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) insn->code = BPF_JMP | BPF_CALL_ARGS; } #endif -#else +#endif + static unsigned int __bpf_prog_ret0_warn(const void *ctx, const struct bpf_insn *insn) { /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON - * is not working properly, so warn about it! + * is not working properly, or interpreter is being used when + * prog->jit_requested is not 0, so warn about it! */ WARN_ON_ONCE(1); return 0; } -#endif
bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) @@ -2380,8 +2381,18 @@ static void bpf_prog_select_func(struct bpf_prog *fp) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); + u32 idx = (round_up(stack_depth, 32) / 32) - 1;
- fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; + /* may_goto may cause stack size > 512, leading to idx out-of-bounds. + * But for non-JITed programs, we don't need bpf_func, so no bounds + * check needed. + */ + if (!fp->jit_requested && + !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) { + fp->bpf_func = interpreters[idx]; + } else { + fp->bpf_func = __bpf_prog_ret0_warn; + } #else fp->bpf_func = __bpf_prog_ret0_warn; #endif diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 60611df77957..c6f3b5f4ff2b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -21897,6 +21897,13 @@ static int do_misc_fixups(struct bpf_verifier_env *env) if (subprogs[cur_subprog + 1].start == i + delta + 1) { subprogs[cur_subprog].stack_depth += stack_depth_extra; subprogs[cur_subprog].stack_extra = stack_depth_extra; + + stack_depth = subprogs[cur_subprog].stack_depth; + if (stack_depth > MAX_BPF_STACK && !prog->jit_requested) { + verbose(env, "stack size %d(extra %d) is too large\n", + stack_depth, stack_depth_extra); + return -EINVAL; + } cur_subprog++; stack_depth = subprogs[cur_subprog].stack_depth; stack_depth_extra = 0; diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 0f910c828973..1892dc8cd211 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -954,10 +954,12 @@ static void dl_update_tasks_root_domain(struct cpuset *cs) css_task_iter_end(&it); }
-static void dl_rebuild_rd_accounting(void) +void dl_rebuild_rd_accounting(void) { struct cpuset *cs = NULL; struct cgroup_subsys_state *pos_css; + int cpu; + u64 cookie = ++dl_cookie;
lockdep_assert_held(&cpuset_mutex); lockdep_assert_cpus_held(); @@ -965,11 +967,12 @@ static void dl_rebuild_rd_accounting(void)
rcu_read_lock();
- /* - * Clear default root domain DL accounting, it will be computed again - * if a task belongs to it. - */ - dl_clear_root_domain(&def_root_domain); + for_each_possible_cpu(cpu) { + if (dl_bw_visited(cpu, cookie)) + continue; + + dl_clear_root_domain_cpu(cpu); + }
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
@@ -994,10 +997,9 @@ static void partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new) { - mutex_lock(&sched_domains_mutex); + sched_domains_mutex_lock(); partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); - dl_rebuild_rd_accounting(); - mutex_unlock(&sched_domains_mutex); + sched_domains_mutex_unlock(); }
/* @@ -1083,6 +1085,13 @@ void rebuild_sched_domains(void) cpus_read_unlock(); }
+void cpuset_reset_sched_domains(void) +{ + mutex_lock(&cpuset_mutex); + partition_sched_domains(1, NULL, NULL); + mutex_unlock(&cpuset_mutex); +} + /** * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed diff --git a/kernel/cpu.c b/kernel/cpu.c index 07455d25329c..ad755db29efd 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1453,11 +1453,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
out: cpus_write_unlock(); - /* - * Do post unplug cleanup. This is still protected against - * concurrent CPU hotplug via cpu_add_remove_lock. - */ - lockup_detector_cleanup(); arch_smt_update(); return ret; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 823aa0824916..f6cf17929bb9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2407,6 +2407,7 @@ ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event) #define DETACH_GROUP 0x01UL #define DETACH_CHILD 0x02UL #define DETACH_DEAD 0x04UL +#define DETACH_EXIT 0x08UL
/* * Cross CPU call to remove a performance event @@ -2421,6 +2422,7 @@ __perf_remove_from_context(struct perf_event *event, void *info) { struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; + enum perf_event_state state = PERF_EVENT_STATE_OFF; unsigned long flags = (unsigned long)info;
ctx_time_update(cpuctx, ctx); @@ -2429,16 +2431,19 @@ __perf_remove_from_context(struct perf_event *event, * Ensure event_sched_out() switches to OFF, at the very least * this avoids raising perf_pending_task() at this time. */ - if (flags & DETACH_DEAD) + if (flags & DETACH_EXIT) + state = PERF_EVENT_STATE_EXIT; + if (flags & DETACH_DEAD) { event->pending_disable = 1; + state = PERF_EVENT_STATE_DEAD; + } event_sched_out(event, ctx); + perf_event_set_state(event, min(event->state, state)); if (flags & DETACH_GROUP) perf_group_detach(event); if (flags & DETACH_CHILD) perf_child_detach(event); list_del_event(event, ctx); - if (flags & DETACH_DEAD) - event->state = PERF_EVENT_STATE_DEAD;
if (!pmu_ctx->nr_events) { pmu_ctx->rotate_necessary = 0; @@ -3558,7 +3563,8 @@ static void perf_event_swap_task_ctx_data(struct perf_event_context *prev_ctx, } }
-static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in) +static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, + struct task_struct *task, bool sched_in) { struct perf_event_pmu_context *pmu_ctx; struct perf_cpu_pmu_context *cpc; @@ -3567,7 +3573,7 @@ static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task) - pmu_ctx->pmu->sched_task(pmu_ctx, sched_in); + pmu_ctx->pmu->sched_task(pmu_ctx, task, sched_in); } }
@@ -3630,7 +3636,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next) WRITE_ONCE(ctx->task, next); WRITE_ONCE(next_ctx->task, task);
- perf_ctx_sched_task_cb(ctx, false); + perf_ctx_sched_task_cb(ctx, task, false); perf_event_swap_task_ctx_data(ctx, next_ctx);
perf_ctx_enable(ctx, false); @@ -3660,7 +3666,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next) perf_ctx_disable(ctx, false);
inside_switch: - perf_ctx_sched_task_cb(ctx, false); + perf_ctx_sched_task_cb(ctx, task, false); task_ctx_sched_out(ctx, NULL, EVENT_ALL);
perf_ctx_enable(ctx, false); @@ -3702,7 +3708,8 @@ void perf_sched_cb_inc(struct pmu *pmu) * PEBS requires this to provide PID/TID information. This requires we flush * all queued PEBS records before we context switch to a new task. */ -static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_in) +static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, + struct task_struct *task, bool sched_in) { struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); struct pmu *pmu; @@ -3716,7 +3723,7 @@ static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_i perf_ctx_lock(cpuctx, cpuctx->task_ctx); perf_pmu_disable(pmu);
- pmu->sched_task(cpc->task_epc, sched_in); + pmu->sched_task(cpc->task_epc, task, sched_in);
perf_pmu_enable(pmu); perf_ctx_unlock(cpuctx, cpuctx->task_ctx); @@ -3734,7 +3741,7 @@ static void perf_pmu_sched_task(struct task_struct *prev, return;
list_for_each_entry(cpc, this_cpu_ptr(&sched_cb_list), sched_cb_entry) - __perf_pmu_sched_task(cpc, sched_in); + __perf_pmu_sched_task(cpc, sched_in ? next : prev, sched_in); }
static void perf_event_switch(struct task_struct *task, @@ -4029,7 +4036,7 @@ static void perf_event_context_sched_in(struct task_struct *task) perf_ctx_lock(cpuctx, ctx); perf_ctx_disable(ctx, false);
- perf_ctx_sched_task_cb(ctx, true); + perf_ctx_sched_task_cb(ctx, task, true);
perf_ctx_enable(ctx, false); perf_ctx_unlock(cpuctx, ctx); @@ -4060,7 +4067,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
perf_event_sched_in(cpuctx, ctx, NULL);
- perf_ctx_sched_task_cb(cpuctx->task_ctx, true); + perf_ctx_sched_task_cb(cpuctx->task_ctx, task, true);
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) perf_ctx_enable(&cpuctx->ctx, false); @@ -13448,12 +13455,7 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) mutex_lock(&parent_event->child_mutex); }
- perf_remove_from_context(event, detach_flags); - - raw_spin_lock_irq(&ctx->lock); - if (event->state > PERF_EVENT_STATE_EXIT) - perf_event_set_state(event, PERF_EVENT_STATE_EXIT); - raw_spin_unlock_irq(&ctx->lock); + perf_remove_from_context(event, detach_flags | DETACH_EXIT);
/* * Child events can be freed. @@ -14002,6 +14004,7 @@ int perf_event_init_task(struct task_struct *child, u64 clone_flags) child->perf_event_ctxp = NULL; mutex_init(&child->perf_event_mutex); INIT_LIST_HEAD(&child->perf_event_list); + child->perf_ctx_data = NULL;
ret = perf_event_init_context(child, clone_flags); if (ret) { diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 180509132d4b..09459647cb82 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -19,7 +19,7 @@
static void perf_output_wakeup(struct perf_output_handle *handle) { - atomic_set(&handle->rb->poll, EPOLLIN); + atomic_set(&handle->rb->poll, EPOLLIN | EPOLLRDNORM);
handle->event->pending_wakeup = 1;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index b4ca8898fe17..7420a2a0d1f7 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -173,6 +173,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); int err; struct mmu_notifier_range range; + pte_t pte;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, addr + PAGE_SIZE); @@ -192,6 +193,16 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, if (!page_vma_mapped_walk(&pvmw)) goto unlock; VM_BUG_ON_PAGE(addr != pvmw.address, old_page); + pte = ptep_get(pvmw.pte); + + /* + * Handle PFN swap PTES, such as device-exclusive ones, that actually + * map pages: simply trigger GUP again to fix it up. + */ + if (unlikely(!pte_present(pte))) { + page_vma_mapped_walk_done(&pvmw); + goto unlock; + }
if (new_page) { folio_get(new_folio); @@ -206,7 +217,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, inc_mm_counter(mm, MM_ANONPAGES); }
- flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); + flush_cache_page(vma, addr, pte_pfn(pte)); ptep_clear_flush(vma, addr, pvmw.pte); if (new_page) set_pte_at(mm, addr, pvmw.pte, @@ -2169,8 +2180,8 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags) */ unsigned long uprobe_get_trampoline_vaddr(void) { + unsigned long trampoline_vaddr = UPROBE_NO_TRAMPOLINE_VADDR; struct xol_area *area; - unsigned long trampoline_vaddr = -1;
/* Pairs with xol_add_vma() smp_store_release() */ area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ diff --git a/kernel/fork.c b/kernel/fork.c index 735405a9c5f3..ca2ca3884f76 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -504,6 +504,10 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) vma_numab_state_init(new); dup_anon_vma_name(orig, new);
+ /* track_pfn_copy() will later take care of copying internal state. */ + if (unlikely(new->vm_flags & VM_PFNMAP)) + untrack_pfn_clear(new); + return new; }
diff --git a/kernel/kexec_elf.c b/kernel/kexec_elf.c index d3689632e8b9..3a5c25b2adc9 100644 --- a/kernel/kexec_elf.c +++ b/kernel/kexec_elf.c @@ -390,7 +390,7 @@ int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, struct kexec_buf *kbuf, unsigned long *lowest_load_addr) { - unsigned long lowest_addr = UINT_MAX; + unsigned long lowest_addr = ULONG_MAX; int ret; size_t i;
diff --git a/kernel/reboot.c b/kernel/reboot.c index b5a8569e5d81..f348f1ba9e22 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -932,48 +932,76 @@ void orderly_reboot(void) } EXPORT_SYMBOL_GPL(orderly_reboot);
+static const char *hw_protection_action_str(enum hw_protection_action action) +{ + switch (action) { + case HWPROT_ACT_SHUTDOWN: + return "shutdown"; + case HWPROT_ACT_REBOOT: + return "reboot"; + default: + return "undefined"; + } +} + +static enum hw_protection_action hw_failure_emergency_action; + /** - * hw_failure_emergency_poweroff_func - emergency poweroff work after a known delay - * @work: work_struct associated with the emergency poweroff function + * hw_failure_emergency_action_func - emergency action work after a known delay + * @work: work_struct associated with the emergency action function * * This function is called in very critical situations to force - * a kernel poweroff after a configurable timeout value. + * a kernel poweroff or reboot after a configurable timeout value. */ -static void hw_failure_emergency_poweroff_func(struct work_struct *work) +static void hw_failure_emergency_action_func(struct work_struct *work) { + const char *action_str = hw_protection_action_str(hw_failure_emergency_action); + + pr_emerg("Hardware protection timed-out. Trying forced %s\n", + action_str); + /* - * We have reached here after the emergency shutdown waiting period has - * expired. This means orderly_poweroff has not been able to shut off - * the system for some reason. + * We have reached here after the emergency action waiting period has + * expired. This means orderly_poweroff/reboot has not been able to + * shut off the system for some reason. * - * Try to shut down the system immediately using kernel_power_off - * if populated + * Try to shut off the system immediately if possible */ - pr_emerg("Hardware protection timed-out. Trying forced poweroff\n"); - kernel_power_off(); + + if (hw_failure_emergency_action == HWPROT_ACT_REBOOT) + kernel_restart(NULL); + else + kernel_power_off();
/* * Worst of the worst case trigger emergency restart */ - pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n"); + pr_emerg("Hardware protection %s failed. Trying emergency restart\n", + action_str); emergency_restart(); }
-static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work, - hw_failure_emergency_poweroff_func); +static DECLARE_DELAYED_WORK(hw_failure_emergency_action_work, + hw_failure_emergency_action_func);
/** - * hw_failure_emergency_poweroff - Trigger an emergency system poweroff + * hw_failure_emergency_schedule - Schedule an emergency system shutdown or reboot + * + * @action: The hardware protection action to be taken + * @action_delay_ms: Time in milliseconds to elapse before triggering action * * This may be called from any critical situation to trigger a system shutdown - * after a given period of time. If time is negative this is not scheduled. + * or reboot after a given period of time. + * If time is negative this is not scheduled. */ -static void hw_failure_emergency_poweroff(int poweroff_delay_ms) +static void hw_failure_emergency_schedule(enum hw_protection_action action, + int action_delay_ms) { - if (poweroff_delay_ms <= 0) + if (action_delay_ms <= 0) return; - schedule_delayed_work(&hw_failure_emergency_poweroff_work, - msecs_to_jiffies(poweroff_delay_ms)); + hw_failure_emergency_action = action; + schedule_delayed_work(&hw_failure_emergency_action_work, + msecs_to_jiffies(action_delay_ms)); }
/** @@ -983,10 +1011,7 @@ static void hw_failure_emergency_poweroff(int poweroff_delay_ms) * @ms_until_forced: Time to wait for orderly shutdown or reboot before * triggering it. Negative value disables the forced * shutdown or reboot. - * @shutdown: If true, indicates that a shutdown will happen - * after the critical tempeature is reached. - * If false, indicates that a reboot will happen - * after the critical tempeature is reached. + * @action: The hardware protection action to be taken. * * Initiate an emergency system shutdown or reboot in order to protect * hardware from further damage. Usage examples include a thermal protection. @@ -994,7 +1019,8 @@ static void hw_failure_emergency_poweroff(int poweroff_delay_ms) * pending even if the previous request has given a large timeout for forced * shutdown/reboot. */ -void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown) +void __hw_protection_shutdown(const char *reason, int ms_until_forced, + enum hw_protection_action action) { static atomic_t allow_proceed = ATOMIC_INIT(1);
@@ -1008,11 +1034,11 @@ void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shut * Queue a backup emergency shutdown in the event of * orderly_poweroff failure */ - hw_failure_emergency_poweroff(ms_until_forced); - if (shutdown) - orderly_poweroff(true); - else + hw_failure_emergency_schedule(action, ms_until_forced); + if (action == HWPROT_ACT_REBOOT) orderly_reboot(); + else + orderly_poweroff(true); } EXPORT_SYMBOL_GPL(__hw_protection_shutdown);
diff --git a/kernel/rseq.c b/kernel/rseq.c index 2cb16091ec0a..a7d81229eda0 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -78,24 +78,24 @@ static int rseq_validate_ro_fields(struct task_struct *t) return -EFAULT; }
-static void rseq_set_ro_fields(struct task_struct *t, u32 cpu_id_start, u32 cpu_id, - u32 node_id, u32 mm_cid) -{ - rseq_kernel_fields(t)->cpu_id_start = cpu_id; - rseq_kernel_fields(t)->cpu_id = cpu_id; - rseq_kernel_fields(t)->node_id = node_id; - rseq_kernel_fields(t)->mm_cid = mm_cid; -} +/* + * Update an rseq field and its in-kernel copy in lock-step to keep a coherent + * state. + */ +#define rseq_unsafe_put_user(t, value, field, error_label) \ + do { \ + unsafe_put_user(value, &t->rseq->field, error_label); \ + rseq_kernel_fields(t)->field = value; \ + } while (0) + #else static int rseq_validate_ro_fields(struct task_struct *t) { return 0; }
-static void rseq_set_ro_fields(struct task_struct *t, u32 cpu_id_start, u32 cpu_id, - u32 node_id, u32 mm_cid) -{ -} +#define rseq_unsafe_put_user(t, value, field, error_label) \ + unsafe_put_user(value, &t->rseq->field, error_label) #endif
/* @@ -173,17 +173,18 @@ static int rseq_update_cpu_node_id(struct task_struct *t) WARN_ON_ONCE((int) mm_cid < 0); if (!user_write_access_begin(rseq, t->rseq_len)) goto efault; - unsafe_put_user(cpu_id, &rseq->cpu_id_start, efault_end); - unsafe_put_user(cpu_id, &rseq->cpu_id, efault_end); - unsafe_put_user(node_id, &rseq->node_id, efault_end); - unsafe_put_user(mm_cid, &rseq->mm_cid, efault_end); + + rseq_unsafe_put_user(t, cpu_id, cpu_id_start, efault_end); + rseq_unsafe_put_user(t, cpu_id, cpu_id, efault_end); + rseq_unsafe_put_user(t, node_id, node_id, efault_end); + rseq_unsafe_put_user(t, mm_cid, mm_cid, efault_end); + /* * Additional feature fields added after ORIG_RSEQ_SIZE * need to be conditionally updated only if * t->rseq_len != ORIG_RSEQ_SIZE. */ user_write_access_end(); - rseq_set_ro_fields(t, cpu_id, cpu_id, node_id, mm_cid); trace_rseq_update(t); return 0;
@@ -195,6 +196,7 @@ static int rseq_update_cpu_node_id(struct task_struct *t)
static int rseq_reset_rseq_cpu_node_id(struct task_struct *t) { + struct rseq __user *rseq = t->rseq; u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED, node_id = 0, mm_cid = 0;
@@ -202,38 +204,36 @@ static int rseq_reset_rseq_cpu_node_id(struct task_struct *t) * Validate read-only rseq fields. */ if (rseq_validate_ro_fields(t)) - return -EFAULT; - /* - * Reset cpu_id_start to its initial state (0). - */ - if (put_user(cpu_id_start, &t->rseq->cpu_id_start)) - return -EFAULT; - /* - * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming - * in after unregistration can figure out that rseq needs to be - * registered again. - */ - if (put_user(cpu_id, &t->rseq->cpu_id)) - return -EFAULT; - /* - * Reset node_id to its initial state (0). - */ - if (put_user(node_id, &t->rseq->node_id)) - return -EFAULT; + goto efault; + + if (!user_write_access_begin(rseq, t->rseq_len)) + goto efault; + /* - * Reset mm_cid to its initial state (0). + * Reset all fields to their initial state. + * + * All fields have an initial state of 0 except cpu_id which is set to + * RSEQ_CPU_ID_UNINITIALIZED, so that any user coming in after + * unregistration can figure out that rseq needs to be registered + * again. */ - if (put_user(mm_cid, &t->rseq->mm_cid)) - return -EFAULT; - - rseq_set_ro_fields(t, cpu_id_start, cpu_id, node_id, mm_cid); + rseq_unsafe_put_user(t, cpu_id_start, cpu_id_start, efault_end); + rseq_unsafe_put_user(t, cpu_id, cpu_id, efault_end); + rseq_unsafe_put_user(t, node_id, node_id, efault_end); + rseq_unsafe_put_user(t, mm_cid, mm_cid, efault_end);
/* * Additional feature fields added after ORIG_RSEQ_SIZE * need to be conditionally reset only if * t->rseq_len != ORIG_RSEQ_SIZE. */ + user_write_access_end(); return 0; + +efault_end: + user_write_access_end(); +efault: + return -EFAULT; }
static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 042351c7afce..3c7c942c7c42 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8183,7 +8183,7 @@ static void cpuset_cpu_active(void) * operation in the resume sequence, just build a single sched * domain, ignoring cpusets. */ - partition_sched_domains(1, NULL, NULL); + cpuset_reset_sched_domains(); if (--num_cpus_frozen) return; /* @@ -8202,7 +8202,7 @@ static void cpuset_cpu_inactive(unsigned int cpu) cpuset_update_active_cpus(); } else { num_cpus_frozen++; - partition_sched_domains(1, NULL, NULL); + cpuset_reset_sched_domains(); } }
@@ -8424,9 +8424,9 @@ void __init sched_init_smp(void) * CPU masks are stable and all blatant races in the below code cannot * happen. */ - mutex_lock(&sched_domains_mutex); + sched_domains_mutex_lock(); sched_init_domains(cpu_active_mask); - mutex_unlock(&sched_domains_mutex); + sched_domains_mutex_unlock();
/* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index ff4df16b5186..5dca336cdd7c 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -166,14 +166,14 @@ static inline unsigned long dl_bw_capacity(int i) } }
-static inline bool dl_bw_visited(int cpu, u64 gen) +bool dl_bw_visited(int cpu, u64 cookie) { struct root_domain *rd = cpu_rq(cpu)->rd;
- if (rd->visit_gen == gen) + if (rd->visit_cookie == cookie) return true;
- rd->visit_gen = gen; + rd->visit_cookie = cookie; return false; }
@@ -207,7 +207,7 @@ static inline unsigned long dl_bw_capacity(int i) return SCHED_CAPACITY_SCALE; }
-static inline bool dl_bw_visited(int cpu, u64 gen) +bool dl_bw_visited(int cpu, u64 cookie) { return false; } @@ -2956,7 +2956,7 @@ void dl_add_task_root_domain(struct task_struct *p) struct dl_bw *dl_b;
raw_spin_lock_irqsave(&p->pi_lock, rf.flags); - if (!dl_task(p)) { + if (!dl_task(p) || dl_entity_is_special(&p->dl)) { raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); return; } @@ -2981,18 +2981,22 @@ void dl_clear_root_domain(struct root_domain *rd) rd->dl_bw.total_bw = 0;
/* - * dl_server bandwidth is only restored when CPUs are attached to root - * domains (after domains are created or CPUs moved back to the - * default root doamin). + * dl_servers are not tasks. Since dl_add_task_root_domain ignores + * them, we need to account for them here explicitly. */ for_each_cpu(i, rd->span) { struct sched_dl_entity *dl_se = &cpu_rq(i)->fair_server;
if (dl_server(dl_se) && cpu_active(i)) - rd->dl_bw.total_bw += dl_se->dl_bw; + __dl_add(&rd->dl_bw, dl_se->dl_bw, dl_bw_cpus(i)); } }
+void dl_clear_root_domain_cpu(int cpu) +{ + dl_clear_root_domain(cpu_rq(cpu)->rd); +} + #endif /* CONFIG_SMP */
static void switched_from_dl(struct rq *rq, struct task_struct *p) @@ -3171,15 +3175,18 @@ DEFINE_SCHED_CLASS(dl) = { #endif };
-/* Used for dl_bw check and update, used under sched_rt_handler()::mutex */ -static u64 dl_generation; +/* + * Used for dl_bw check and update, used under sched_rt_handler()::mutex and + * sched_domains_mutex. + */ +u64 dl_cookie;
int sched_dl_global_validate(void) { u64 runtime = global_rt_runtime(); u64 period = global_rt_period(); u64 new_bw = to_ratio(period, runtime); - u64 gen = ++dl_generation; + u64 cookie = ++dl_cookie; struct dl_bw *dl_b; int cpu, cpus, ret = 0; unsigned long flags; @@ -3192,7 +3199,7 @@ int sched_dl_global_validate(void) for_each_online_cpu(cpu) { rcu_read_lock_sched();
- if (dl_bw_visited(cpu, gen)) + if (dl_bw_visited(cpu, cookie)) goto next;
dl_b = dl_bw_of(cpu); @@ -3229,7 +3236,7 @@ static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq) void sched_dl_do_global(void) { u64 new_bw = -1; - u64 gen = ++dl_generation; + u64 cookie = ++dl_cookie; struct dl_bw *dl_b; int cpu; unsigned long flags; @@ -3240,7 +3247,7 @@ void sched_dl_do_global(void) for_each_possible_cpu(cpu) { rcu_read_lock_sched();
- if (dl_bw_visited(cpu, gen)) { + if (dl_bw_visited(cpu, cookie)) { rcu_read_unlock_sched(); continue; } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index ef047add7f9e..a0893a483d35 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -292,7 +292,7 @@ static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf, bool orig;
cpus_read_lock(); - mutex_lock(&sched_domains_mutex); + sched_domains_mutex_lock();
orig = sched_debug_verbose; result = debugfs_write_file_bool(filp, ubuf, cnt, ppos); @@ -304,7 +304,7 @@ static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf, sd_dentry = NULL; }
- mutex_unlock(&sched_domains_mutex); + sched_domains_mutex_unlock(); cpus_read_unlock();
return result; @@ -515,9 +515,9 @@ static __init int sched_init_debug(void) debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost); debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
- mutex_lock(&sched_domains_mutex); + sched_domains_mutex_lock(); update_sched_domain_debugfs(); - mutex_unlock(&sched_domains_mutex); + sched_domains_mutex_unlock(); #endif
#ifdef CONFIG_NUMA_BALANCING diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c798d2795243..89c7260103e1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -883,6 +883,26 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) return __node_2_se(left); }
+/* + * HACK, stash a copy of deadline at the point of pick in vlag, + * which isn't used until dequeue. + */ +static inline void set_protect_slice(struct sched_entity *se) +{ + se->vlag = se->deadline; +} + +static inline bool protect_slice(struct sched_entity *se) +{ + return se->vlag == se->deadline; +} + +static inline void cancel_protect_slice(struct sched_entity *se) +{ + if (protect_slice(se)) + se->vlag = se->deadline + 1; +} + /* * Earliest Eligible Virtual Deadline First * @@ -919,11 +939,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr))) curr = NULL;
- /* - * Once selected, run a task until it either becomes non-eligible or - * until it gets a new slice. See the HACK in set_next_entity(). - */ - if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline) + if (sched_feat(RUN_TO_PARITY) && curr && protect_slice(curr)) return curr;
/* Pick the leftmost entity if it's eligible */ @@ -5530,11 +5546,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end_fair(cfs_rq, se); __dequeue_entity(cfs_rq, se); update_load_avg(cfs_rq, se, UPDATE_TG); - /* - * HACK, stash a copy of deadline at the point of pick in vlag, - * which isn't used until dequeue. - */ - se->vlag = se->deadline; + + set_protect_slice(se); }
update_stats_curr_start(cfs_rq, se); @@ -6991,6 +7004,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) update_cfs_group(se);
se->slice = slice; + if (se != cfs_rq->curr) + min_vruntime_cb_propagate(&se->run_node, NULL); slice = cfs_rq_min_slice(cfs_rq);
cfs_rq->h_nr_runnable += h_nr_runnable; @@ -7120,6 +7135,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) update_cfs_group(se);
se->slice = slice; + if (se != cfs_rq->curr) + min_vruntime_cb_propagate(&se->run_node, NULL); slice = cfs_rq_min_slice(cfs_rq);
cfs_rq->h_nr_runnable -= h_nr_runnable; @@ -8783,8 +8800,15 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int * Preempt an idle entity in favor of a non-idle entity (and don't preempt * in the inverse case). */ - if (cse_is_idle && !pse_is_idle) + if (cse_is_idle && !pse_is_idle) { + /* + * When non-idle entity preempt an idle entity, + * don't give idle entity slice protection. + */ + cancel_protect_slice(se); goto preempt; + } + if (cse_is_idle != pse_is_idle) return;
@@ -8803,8 +8827,8 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int * Note that even if @p does not turn out to be the most eligible * task at this moment, current's slice protection will be lost. */ - if (do_preempt_short(cfs_rq, pse, se) && se->vlag == se->deadline) - se->vlag = se->deadline + 1; + if (do_preempt_short(cfs_rq, pse, se)) + cancel_protect_slice(se);
/* * If @p has become the most eligible task, force preemption. diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 4b8e33c615b1..8cebe71d2bb1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2910,6 +2910,7 @@ static int sched_rt_handler(const struct ctl_table *table, int write, void *buff int ret;
mutex_lock(&mutex); + sched_domains_mutex_lock(); old_period = sysctl_sched_rt_period; old_runtime = sysctl_sched_rt_runtime;
@@ -2936,6 +2937,7 @@ static int sched_rt_handler(const struct ctl_table *table, int write, void *buff sysctl_sched_rt_period = old_period; sysctl_sched_rt_runtime = old_runtime; } + sched_domains_mutex_unlock(); mutex_unlock(&mutex);
return ret; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 023b844159c9..1aa65a0ac586 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -998,7 +998,7 @@ struct root_domain { * Also, some corner cases, like 'wrap around' is dangerous, but given * that u64 is 'big enough'. So that shouldn't be a concern. */ - u64 visit_gen; + u64 visit_cookie;
#ifdef HAVE_RT_PUSH_IPI /* diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index c49aea8c1025..363ad268a25b 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -6,6 +6,14 @@ #include <linux/bsearch.h>
DEFINE_MUTEX(sched_domains_mutex); +void sched_domains_mutex_lock(void) +{ + mutex_lock(&sched_domains_mutex); +} +void sched_domains_mutex_unlock(void) +{ + mutex_unlock(&sched_domains_mutex); +}
/* Protected by sched_domains_mutex: */ static cpumask_var_t sched_domains_tmpmask; @@ -560,7 +568,7 @@ static int init_rootdomain(struct root_domain *rd) rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func); #endif
- rd->visit_gen = 0; + rd->visit_cookie = 0; init_dl_bw(&rd->dl_bw); if (cpudl_init(&rd->cpudl) != 0) goto free_rto_mask; @@ -2783,6 +2791,7 @@ void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], ndoms_cur = ndoms_new;
update_sched_domain_debugfs(); + dl_rebuild_rd_accounting(); }
/* @@ -2791,7 +2800,7 @@ void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new) { - mutex_lock(&sched_domains_mutex); + sched_domains_mutex_lock(); partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); - mutex_unlock(&sched_domains_mutex); + sched_domains_mutex_unlock(); } diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 7bbb408431eb..3231f63d93d8 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -29,13 +29,11 @@ #include <linux/syscalls.h> #include <linux/sysctl.h>
+#include <asm/syscall.h> + /* Not exposed in headers: strictly internal use only. */ #define SECCOMP_MODE_DEAD (SECCOMP_MODE_FILTER + 1)
-#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER -#include <asm/syscall.h> -#endif - #ifdef CONFIG_SECCOMP_FILTER #include <linux/file.h> #include <linux/filter.h> @@ -1074,6 +1072,14 @@ void secure_computing_strict(int this_syscall) else BUG(); } +int __secure_computing(const struct seccomp_data *sd) +{ + int this_syscall = sd ? sd->nr : + syscall_get_nr(current, current_pt_regs()); + + secure_computing_strict(this_syscall); + return 0; +} #else
#ifdef CONFIG_SECCOMP_FILTER diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index adc947587eb8..a612f6f182e5 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -843,7 +843,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struc if (unlikely(is_global_init(task))) return -EPERM;
- if (!preemptible()) { + if (preempt_count() != 0 || irqs_disabled()) { /* Do an early check on signal validity. Otherwise, * the error is lost in deferred irq_work. */ diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bb6089c2951e..510409f97992 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -7411,9 +7411,9 @@ static __init int rb_write_something(struct rb_test_data *data, bool nested) /* Ignore dropped events before test starts. */ if (started) { if (nested) - data->bytes_dropped += len; - else data->bytes_dropped_nested += len; + else + data->bytes_dropped += len; } return len; } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 513de9ceb80e..b1f6d04f9fe9 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -470,6 +470,7 @@ static void test_event_printk(struct trace_event_call *call) case '%': continue; case 'p': + do_pointer: /* Find dereferencing fields */ switch (fmt[i + 1]) { case 'B': case 'R': case 'r': @@ -498,6 +499,12 @@ static void test_event_printk(struct trace_event_call *call) continue; if (fmt[i + j] == '*') { star = true; + /* Handle %*pbl case */ + if (!j && fmt[i + 1] == 'p') { + arg++; + i++; + goto do_pointer; + } continue; } if ((fmt[i + j] == 's')) { diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index e3f7d09e5512..0330ecdfb9f1 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -305,7 +305,7 @@ static const char *synth_field_fmt(char *type) else if (strcmp(type, "gfp_t") == 0) fmt = "%x"; else if (synth_field_is_string(type)) - fmt = "%.*s"; + fmt = "%s"; else if (synth_field_is_stack(type)) fmt = "%s";
@@ -852,6 +852,38 @@ static struct trace_event_fields synth_event_fields_array[] = { {} };
+static int synth_event_reg(struct trace_event_call *call, + enum trace_reg type, void *data) +{ + struct synth_event *event = container_of(call, struct synth_event, call); + + switch (type) { +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_REGISTER: +#endif + case TRACE_REG_REGISTER: + if (!try_module_get(event->mod)) + return -EBUSY; + break; + default: + break; + } + + int ret = trace_event_reg(call, type, data); + + switch (type) { +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_UNREGISTER: +#endif + case TRACE_REG_UNREGISTER: + module_put(event->mod); + break; + default: + break; + } + return ret; +} + static int register_synth_event(struct synth_event *event) { struct trace_event_call *call = &event->call; @@ -881,7 +913,7 @@ static int register_synth_event(struct synth_event *event) goto out; } call->flags = TRACE_EVENT_FL_TRACEPOINT; - call->class->reg = trace_event_reg; + call->class->reg = synth_event_reg; call->class->probe = trace_event_raw_event_synth; call->data = event; call->tp = event->tp; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 136c750b0b4d..b3ee425bf2d7 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -1511,6 +1511,7 @@ void graph_trace_close(struct trace_iterator *iter) if (data) { free_percpu(data->cpu_data); kfree(data); + iter->private = NULL; } }
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 7294ad676379..f05b719d0716 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -250,8 +250,6 @@ static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - else - iter->private = NULL; }
static void irqsoff_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index f3a2722ee4c0..c83a51218ee5 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -2032,7 +2032,6 @@ static int start_kthread(unsigned int cpu)
if (IS_ERR(kthread)) { pr_err(BANNER "could not start sampling thread\n"); - stop_per_cpu_kthreads(); return -ENOMEM; }
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index af30586f1aea..e24ddcc23481 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -188,8 +188,6 @@ static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - else - iter->private = NULL; }
static void wakeup_trace_close(struct trace_iterator *iter) diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 5267adeaa403..41e4e8070923 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -269,6 +269,15 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) if (ret < 0) goto error;
+ /* + * pipe_resize_ring() does not update nr_accounted for watch_queue + * pipes, because the above vastly overprovisions. Set nr_accounted on + * and max_usage this pipe to the number that was actually charged to + * the user above via account_pipe_buffers. + */ + pipe->max_usage = nr_pages; + pipe->nr_accounted = nr_pages; + ret = -ENOMEM; pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index b2da7de39d06..18156023e461 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -347,8 +347,6 @@ static int __init watchdog_thresh_setup(char *str) } __setup("watchdog_thresh=", watchdog_thresh_setup);
-static void __lockup_detector_cleanup(void); - #ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM enum stats_per_group { STATS_SYSTEM, @@ -886,11 +884,6 @@ static void __lockup_detector_reconfigure(void)
watchdog_hardlockup_start(); cpus_read_unlock(); - /* - * Must be called outside the cpus locked section to prevent - * recursive locking in the perf code. - */ - __lockup_detector_cleanup(); }
void lockup_detector_reconfigure(void) @@ -940,24 +933,6 @@ static inline void lockup_detector_setup(void) } #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
-static void __lockup_detector_cleanup(void) -{ - lockdep_assert_held(&watchdog_mutex); - hardlockup_detector_perf_cleanup(); -} - -/** - * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes - * - * Caller must not hold the cpu hotplug rwsem. - */ -void lockup_detector_cleanup(void) -{ - mutex_lock(&watchdog_mutex); - __lockup_detector_cleanup(); - mutex_unlock(&watchdog_mutex); -} - /** * lockup_detector_soft_poweroff - Interface to stop lockup detector(s) * diff --git a/kernel/watchdog_perf.c b/kernel/watchdog_perf.c index 59c1d86a73a2..2fdb96eaf493 100644 --- a/kernel/watchdog_perf.c +++ b/kernel/watchdog_perf.c @@ -21,8 +21,6 @@ #include <linux/perf_event.h>
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); -static DEFINE_PER_CPU(struct perf_event *, dead_event); -static struct cpumask dead_events_mask;
static atomic_t watchdog_cpus = ATOMIC_INIT(0);
@@ -181,36 +179,12 @@ void watchdog_hardlockup_disable(unsigned int cpu)
if (event) { perf_event_disable(event); + perf_event_release_kernel(event); this_cpu_write(watchdog_ev, NULL); - this_cpu_write(dead_event, event); - cpumask_set_cpu(smp_processor_id(), &dead_events_mask); atomic_dec(&watchdog_cpus); } }
-/** - * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them - * - * Called from lockup_detector_cleanup(). Serialized by the caller. - */ -void hardlockup_detector_perf_cleanup(void) -{ - int cpu; - - for_each_cpu(cpu, &dead_events_mask) { - struct perf_event *event = per_cpu(dead_event, cpu); - - /* - * Required because for_each_cpu() reports unconditionally - * CPU0 as set on UP kernels. Sigh. - */ - if (event) - perf_event_release_kernel(event); - per_cpu(dead_event, cpu) = NULL; - } - cpumask_clear(&dead_events_mask); -} - /** * hardlockup_detector_perf_stop - Globally stop watchdog events * diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c index c02baa4168e1..055356508d97 100644 --- a/lib/842/842_compress.c +++ b/lib/842/842_compress.c @@ -532,6 +532,8 @@ int sw842_compress(const u8 *in, unsigned int ilen, } if (repeat_count) { ret = add_repeat_template(p, repeat_count); + if (ret) + return ret; repeat_count = 0; if (next == last) /* reached max repeat bits */ goto repeat; diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c index 135322592faf..63aa78e6f5c1 100644 --- a/lib/stackinit_kunit.c +++ b/lib/stackinit_kunit.c @@ -184,6 +184,15 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size, #define INIT_UNION_assigned_copy(var_type) \ INIT_STRUCT_assigned_copy(var_type)
+/* + * The "did we actually fill the stack?" check value needs + * to be neither 0 nor any of the "pattern" bytes. The + * pattern bytes are compiler, architecture, and type based, + * so we have to pick a value that never appears for those + * combinations. Use 0x99 which is not 0xFF, 0xFE, nor 0xAA. + */ +#define FILL_BYTE 0x99 + /* * @name: unique string name for the test * @var_type: type to be tested for zeroing initialization @@ -206,12 +215,12 @@ static noinline void test_ ## name (struct kunit *test) \ ZERO_CLONE_ ## which(zero); \ /* Clear entire check buffer for 0xFF overlap test. */ \ memset(check_buf, 0x00, sizeof(check_buf)); \ - /* Fill stack with 0xFF. */ \ + /* Fill stack with FILL_BYTE. */ \ ignored = leaf_ ##name((unsigned long)&ignored, 1, \ FETCH_ARG_ ## which(zero)); \ - /* Verify all bytes overwritten with 0xFF. */ \ + /* Verify all bytes overwritten with FILL_BYTE. */ \ for (sum = 0, i = 0; i < target_size; i++) \ - sum += (check_buf[i] != 0xFF); \ + sum += (check_buf[i] != FILL_BYTE); \ /* Clear entire check buffer for later bit tests. */ \ memset(check_buf, 0x00, sizeof(check_buf)); \ /* Extract stack-defined variable contents. */ \ @@ -222,7 +231,8 @@ static noinline void test_ ## name (struct kunit *test) \ * possible between the two leaf function calls. \ */ \ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \ - "leaf fill was not 0xFF!?\n"); \ + "leaf fill was not 0x%02X!?\n", \ + FILL_BYTE); \ \ /* Validate that compiler lined up fill and target. */ \ KUNIT_ASSERT_TRUE_MSG(test, \ @@ -234,9 +244,9 @@ static noinline void test_ ## name (struct kunit *test) \ (int)((ssize_t)(uintptr_t)fill_start - \ (ssize_t)(uintptr_t)target_start)); \ \ - /* Look for any bytes still 0xFF in check region. */ \ + /* Validate check region has no FILL_BYTE bytes. */ \ for (sum = 0, i = 0; i < target_size; i++) \ - sum += (check_buf[i] == 0xFF); \ + sum += (check_buf[i] == FILL_BYTE); \ \ if (sum != 0 && xfail) \ kunit_skip(test, \ @@ -271,12 +281,12 @@ static noinline int leaf_ ## name(unsigned long sp, bool fill, \ * stack frame of SOME kind... \ */ \ memset(buf, (char)(sp & 0xff), sizeof(buf)); \ - /* Fill variable with 0xFF. */ \ + /* Fill variable with FILL_BYTE. */ \ if (fill) { \ fill_start = &var; \ fill_size = sizeof(var); \ memset(fill_start, \ - (char)((sp & 0xff) | forced_mask), \ + FILL_BYTE & forced_mask, \ fill_size); \ } \ \ @@ -469,7 +479,7 @@ static int noinline __leaf_switch_none(int path, bool fill) fill_start = &var; fill_size = sizeof(var);
- memset(fill_start, forced_mask | 0x55, fill_size); + memset(fill_start, (forced_mask | 0x55) & FILL_BYTE, fill_size); } memcpy(check_buf, target_start, target_size); break; @@ -480,7 +490,7 @@ static int noinline __leaf_switch_none(int path, bool fill) fill_start = &var; fill_size = sizeof(var);
- memset(fill_start, forced_mask | 0xaa, fill_size); + memset(fill_start, (forced_mask | 0xaa) & FILL_BYTE, fill_size); } memcpy(check_buf, target_start, target_size); break; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 56fe96319292..a8ac4c4fffcf 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -2285,7 +2285,7 @@ int __init no_hash_pointers_enable(char *str) early_param("no_hash_pointers", no_hash_pointers_enable);
/* Used for Rust formatting ('%pA'). */ -char *rust_fmt_argument(char *buf, char *end, void *ptr); +char *rust_fmt_argument(char *buf, char *end, const void *ptr);
/* * Show a '%p' thing. A kernel extension is that the '%p' is followed diff --git a/mm/gup.c b/mm/gup.c index 3883b307780e..61e751baf862 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1283,6 +1283,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) return -EOPNOTSUPP;
+ if ((gup_flags & FOLL_SPLIT_PMD) && is_vm_hugetlb_page(vma)) + return -EOPNOTSUPP; + if (vma_is_secretmem(vma)) return -EFAULT;
diff --git a/mm/memory.c b/mm/memory.c index fb7b8dc75167..53f7b0aaf2a3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1362,12 +1362,12 @@ int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { pgd_t *src_pgd, *dst_pgd; - unsigned long next; unsigned long addr = src_vma->vm_start; unsigned long end = src_vma->vm_end; struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; struct mmu_notifier_range range; + unsigned long next, pfn; bool is_cow; int ret;
@@ -1378,11 +1378,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { - /* - * We do not free on error cases below as remove_vma - * gets called on error from higher level routine - */ - ret = track_pfn_copy(src_vma); + ret = track_pfn_copy(dst_vma, src_vma, &pfn); if (ret) return ret; } @@ -1419,7 +1415,6 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) continue; if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, addr, next))) { - untrack_pfn_clear(dst_vma); ret = -ENOMEM; break; } @@ -1429,6 +1424,8 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) raw_write_seqcount_end(&src_mm->write_protect_seq); mmu_notifier_invalidate_range_end(&range); } + if (ret && unlikely(src_vma->vm_flags & VM_PFNMAP)) + untrack_pfn_copy(dst_vma, pfn); return ret; }
@@ -6834,10 +6831,8 @@ void __might_fault(const char *file, int line) if (pagefault_disabled()) return; __might_sleep(file, line); -#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) if (current->mm) might_lock_read(¤t->mm->mmap_lock); -#endif } EXPORT_SYMBOL(__might_fault); #endif diff --git a/mm/page-writeback.c b/mm/page-writeback.c index eb55ece39c56..3147119a9a04 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -120,29 +120,6 @@ EXPORT_SYMBOL(laptop_mode);
struct wb_domain global_wb_domain;
-/* consolidated parameters for balance_dirty_pages() and its subroutines */ -struct dirty_throttle_control { -#ifdef CONFIG_CGROUP_WRITEBACK - struct wb_domain *dom; - struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */ -#endif - struct bdi_writeback *wb; - struct fprop_local_percpu *wb_completions; - - unsigned long avail; /* dirtyable */ - unsigned long dirty; /* file_dirty + write + nfs */ - unsigned long thresh; /* dirty threshold */ - unsigned long bg_thresh; /* dirty background threshold */ - - unsigned long wb_dirty; /* per-wb counterparts */ - unsigned long wb_thresh; - unsigned long wb_bg_thresh; - - unsigned long pos_ratio; - bool freerun; - bool dirty_exceeded; -}; - /* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will @@ -1095,7 +1072,7 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc) struct bdi_writeback *wb = dtc->wb; unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth); unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); - unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); + unsigned long limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); unsigned long wb_thresh = dtc->wb_thresh; unsigned long x_intercept; unsigned long setpoint; /* dirty pages' target balance point */ @@ -1962,11 +1939,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb, */ if (pause < min_pause) { trace_balance_dirty_pages(wb, - sdtc->thresh, - sdtc->bg_thresh, - sdtc->dirty, - sdtc->wb_thresh, - sdtc->wb_dirty, + sdtc, dirty_ratelimit, task_ratelimit, pages_dirtied, @@ -1991,11 +1964,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
pause: trace_balance_dirty_pages(wb, - sdtc->thresh, - sdtc->bg_thresh, - sdtc->dirty, - sdtc->wb_thresh, - sdtc->wb_dirty, + sdtc, dirty_ratelimit, task_ratelimit, pages_dirtied, diff --git a/mm/zswap.c b/mm/zswap.c index 23365e76a3ce..c7ff9e94520a 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -881,18 +881,32 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) { struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); + struct acomp_req *req; + struct crypto_acomp *acomp; + u8 *buffer; + + if (IS_ERR_OR_NULL(acomp_ctx)) + return 0;
mutex_lock(&acomp_ctx->mutex); - if (!IS_ERR_OR_NULL(acomp_ctx)) { - if (!IS_ERR_OR_NULL(acomp_ctx->req)) - acomp_request_free(acomp_ctx->req); - acomp_ctx->req = NULL; - if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) - crypto_free_acomp(acomp_ctx->acomp); - kfree(acomp_ctx->buffer); - } + req = acomp_ctx->req; + acomp = acomp_ctx->acomp; + buffer = acomp_ctx->buffer; + acomp_ctx->req = NULL; + acomp_ctx->acomp = NULL; + acomp_ctx->buffer = NULL; mutex_unlock(&acomp_ctx->mutex);
+ /* + * Do the actual freeing after releasing the mutex to avoid subtle + * locking dependencies causing deadlocks. + */ + if (!IS_ERR_OR_NULL(req)) + acomp_request_free(req); + if (!IS_ERR_OR_NULL(acomp)) + crypto_free_acomp(acomp); + kfree(buffer); + return 0; }
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 9f3b8b682adb..3ee7dba34310 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1270,28 +1270,18 @@ static int __must_check ax25_connect(struct socket *sock, } }
- /* - * Must bind first - autobinding in this may or may not work. If - * the socket is already bound, check to see if the device has - * been filled in, error if it hasn't. - */ + /* Must bind first - autobinding does not work. */ if (sock_flag(sk, SOCK_ZAPPED)) { - /* check if we can remove this feature. It is broken. */ - printk(KERN_WARNING "ax25_connect(): %s uses autobind, please contact jreuter@yaina.de\n", - current->comm); - if ((err = ax25_rt_autobind(ax25, &fsa->fsa_ax25.sax25_call)) < 0) { - kfree(digi); - goto out_release; - } + kfree(digi); + err = -EINVAL; + goto out_release; + }
- ax25_fillin_cb(ax25, ax25->ax25_dev); - ax25_cb_add(ax25); - } else { - if (ax25->ax25_dev == NULL) { - kfree(digi); - err = -EHOSTUNREACH; - goto out_release; - } + /* Check to see if the device has been filled in, error if it hasn't. */ + if (ax25->ax25_dev == NULL) { + kfree(digi); + err = -EHOSTUNREACH; + goto out_release; }
if (sk->sk_type == SOCK_SEQPACKET && diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index 69de75db0c9c..10577434f40b 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -373,80 +373,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) return ax25_rt; }
-/* - * Adjust path: If you specify a default route and want to connect - * a target on the digipeater path but w/o having a special route - * set before, the path has to be truncated from your target on. - */ -static inline void ax25_adjust_path(ax25_address *addr, ax25_digi *digipeat) -{ - int k; - - for (k = 0; k < digipeat->ndigi; k++) { - if (ax25cmp(addr, &digipeat->calls[k]) == 0) - break; - } - - digipeat->ndigi = k; -} - - -/* - * Find which interface to use. - */ -int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) -{ - ax25_uid_assoc *user; - ax25_route *ax25_rt; - int err = 0; - - ax25_route_lock_use(); - ax25_rt = ax25_get_route(addr, NULL); - if (!ax25_rt) { - ax25_route_lock_unuse(); - return -EHOSTUNREACH; - } - rcu_read_lock(); - if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { - err = -EHOSTUNREACH; - goto put; - } - - user = ax25_findbyuid(current_euid()); - if (user) { - ax25->source_addr = user->call; - ax25_uid_put(user); - } else { - if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { - err = -EPERM; - goto put; - } - ax25->source_addr = *(ax25_address *)ax25->ax25_dev->dev->dev_addr; - } - - if (ax25_rt->digipeat != NULL) { - ax25->digipeat = kmemdup(ax25_rt->digipeat, sizeof(ax25_digi), - GFP_ATOMIC); - if (ax25->digipeat == NULL) { - err = -ENOMEM; - goto put; - } - ax25_adjust_path(addr, ax25->digipeat); - } - - if (ax25->sk != NULL) { - local_bh_disable(); - bh_lock_sock(ax25->sk); - sock_reset_flag(ax25->sk, SOCK_ZAPPED); - bh_unlock_sock(ax25->sk); - local_bh_enable(); - } - -put: - rcu_read_unlock(); - ax25_route_lock_unuse(); - return err; -}
struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, ax25_address *dest, ax25_digi *digi) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 012fc107901a..94d9147612da 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3552,42 +3552,27 @@ static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) }
/* Schedule SCO */ -static void hci_sched_sco(struct hci_dev *hdev) +static void hci_sched_sco(struct hci_dev *hdev, __u8 type) { struct hci_conn *conn; struct sk_buff *skb; - int quote; + int quote, *cnt; + unsigned int pkts = hdev->sco_pkts;
- BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, "type %u", type);
- if (!hci_conn_num(hdev, SCO_LINK)) + if (!hci_conn_num(hdev, type) || !pkts) return;
- while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { - while (quote-- && (skb = skb_dequeue(&conn->data_q))) { - BT_DBG("skb %p len %d", skb, skb->len); - hci_send_frame(hdev, skb); - - conn->sent++; - if (conn->sent == ~0) - conn->sent = 0; - } - } -} - -static void hci_sched_esco(struct hci_dev *hdev) -{ - struct hci_conn *conn; - struct sk_buff *skb; - int quote; - - BT_DBG("%s", hdev->name); - - if (!hci_conn_num(hdev, ESCO_LINK)) - return; + /* Use sco_pkts if flow control has not been enabled which will limit + * the amount of buffer sent in a row. + */ + if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + cnt = &pkts; + else + cnt = &hdev->sco_cnt;
- while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, - "e))) { + while (*cnt && (conn = hci_low_sent(hdev, type, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(hdev, skb); @@ -3595,8 +3580,17 @@ static void hci_sched_esco(struct hci_dev *hdev) conn->sent++; if (conn->sent == ~0) conn->sent = 0; + (*cnt)--; } } + + /* Rescheduled if all packets were sent and flow control is not enabled + * as there could be more packets queued that could not be sent and + * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule + * needs to be forced. + */ + if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + queue_work(hdev->workqueue, &hdev->tx_work); }
static void hci_sched_acl_pkt(struct hci_dev *hdev) @@ -3632,8 +3626,8 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) chan->conn->sent++;
/* Send pending SCO packets right away */ - hci_sched_sco(hdev); - hci_sched_esco(hdev); + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); } }
@@ -3688,8 +3682,8 @@ static void hci_sched_le(struct hci_dev *hdev) chan->conn->sent++;
/* Send pending SCO packets right away */ - hci_sched_sco(hdev); - hci_sched_esco(hdev); + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); } }
@@ -3734,8 +3728,8 @@ static void hci_tx_work(struct work_struct *work)
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { /* Schedule queues and send stuff to HCI driver */ - hci_sched_sco(hdev); - hci_sched_esco(hdev); + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); hci_sched_iso(hdev); hci_sched_acl(hdev); hci_sched_le(hdev); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 903b0b52692a..e2bfbcee06a8 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -151,7 +151,7 @@ static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_status *rp = data; + struct hci_rp_remote_name_req_cancel *rp = data;
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
@@ -4012,8 +4012,8 @@ static const struct hci_cc { HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), - HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, - hci_cc_remote_name_req_cancel), + HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel, + sizeof(struct hci_rp_remote_name_req_cancel)), HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, sizeof(struct hci_rp_role_discovery)), HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, @@ -4442,9 +4442,11 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, break;
case SCO_LINK: + case ESCO_LINK: hdev->sco_cnt += count; if (hdev->sco_cnt > hdev->sco_pkts) hdev->sco_cnt = hdev->sco_pkts; + break;
case ISO_LINK: @@ -6051,8 +6053,17 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * a LE Direct Advertising Report event. In that case it is * important to see if the address is matching the local * controller address. + * + * If local privacy is not enable the controller shall not be + * generating such event since according to its documentation it is only + * valid for filter_policy 0x02 and 0x03, but the fact that it did + * generate LE Direct Advertising Report means it is probably broken and + * won't generate any other event which can potentially break + * auto-connect logic so in case local privacy is not enable this + * ignores the direct_addr so it works as a regular report. */ - if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) { + if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr && + hci_dev_test_flag(hdev, HCI_PRIVACY)) { direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, &bdaddr_resolved);
@@ -6062,12 +6073,6 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) return;
- /* If the controller is not using resolvable random - * addresses, then this report can be ignored. - */ - if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) - return; - /* If the local IRK of the controller does not match * with the resolvable random address provided, then * this report can be ignored. diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index dd770ef5ec36..14c3ee5c6a1e 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -3696,6 +3696,9 @@ static int hci_read_local_name_sync(struct hci_dev *hdev) /* Read Voice Setting */ static int hci_read_voice_setting_sync(struct hci_dev *hdev) { + if (!read_voice_setting_capable(hdev)) + return 0; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL, HCI_CMD_TIMEOUT); } @@ -3766,6 +3769,28 @@ static int hci_write_ca_timeout_sync(struct hci_dev *hdev) sizeof(param), ¶m, HCI_CMD_TIMEOUT); }
+/* Enable SCO flow control if supported */ +static int hci_write_sync_flowctl_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_sync_flowctl cp; + int err; + + /* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */ + if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) || + !test_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.enable = 0x01; + + err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (!err) + hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL); + + return err; +} + /* BR Controller init stage 2 command sequence */ static const struct hci_init_stage br_init2[] = { /* HCI_OP_READ_BUFFER_SIZE */ @@ -3784,6 +3809,8 @@ static const struct hci_init_stage br_init2[] = { HCI_INIT(hci_clear_event_filter_sync), /* HCI_OP_WRITE_CA_TIMEOUT */ HCI_INIT(hci_write_ca_timeout_sync), + /* HCI_OP_WRITE_SYNC_FLOWCTL */ + HCI_INIT(hci_write_sync_flowctl_sync), {} };
@@ -4129,7 +4156,8 @@ static int hci_read_page_scan_type_sync(struct hci_dev *hdev) * support the Read Page Scan Type command. Check support for * this command in the bit mask of supported commands. */ - if (!(hdev->commands[13] & 0x01)) + if (!(hdev->commands[13] & 0x01) || + test_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks)) return 0;
return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index f213ed108361..6bc0a11f2ed3 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -394,10 +394,26 @@ static int old_deviceless(struct net *net, void __user *data) return -EOPNOTSUPP; }
-int br_ioctl_stub(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg) +int br_ioctl_stub(struct net *net, unsigned int cmd, void __user *uarg) { int ret = -EOPNOTSUPP; + struct ifreq ifr; + + if (cmd == SIOCBRADDIF || cmd == SIOCBRDELIF) { + void __user *data; + char *colon; + + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + + if (get_user_ifreq(&ifr, &data, uarg)) + return -EFAULT; + + ifr.ifr_name[IFNAMSIZ - 1] = 0; + colon = strchr(ifr.ifr_name, ':'); + if (colon) + *colon = 0; + }
rtnl_lock();
@@ -430,7 +446,21 @@ int br_ioctl_stub(struct net *net, struct net_bridge *br, unsigned int cmd, break; case SIOCBRADDIF: case SIOCBRDELIF: - ret = add_del_if(br, ifr->ifr_ifindex, cmd == SIOCBRADDIF); + { + struct net_device *dev; + + dev = __dev_get_by_name(net, ifr.ifr_name); + if (!dev || !netif_device_present(dev)) { + ret = -ENODEV; + break; + } + if (!netif_is_bridge_master(dev)) { + ret = -EOPNOTSUPP; + break; + } + + ret = add_del_if(netdev_priv(dev), ifr.ifr_ifindex, cmd == SIOCBRADDIF); + } break; }
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 1054b8a88edc..d5b3c5936a79 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -949,8 +949,7 @@ br_port_get_check_rtnl(const struct net_device *dev) /* br_ioctl.c */ int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd); -int br_ioctl_stub(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg); +int br_ioctl_stub(struct net *net, unsigned int cmd, void __user *uarg);
/* br_multicast.c */ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 4c2098ac9d72..57f79f8e8466 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -551,7 +551,6 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, int err; struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); const struct net_device_ops *ops; - netdevice_tracker dev_tracker;
if (!dev) return -ENODEV; @@ -614,22 +613,6 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, case SIOCWANDEV: return dev_siocwandev(dev, &ifr->ifr_settings);
- case SIOCBRADDIF: - case SIOCBRDELIF: - if (!netif_device_present(dev)) - return -ENODEV; - if (!netif_is_bridge_master(dev)) - return -EOPNOTSUPP; - - netdev_hold(dev, &dev_tracker, GFP_KERNEL); - rtnl_net_unlock(net); - - err = br_ioctl_call(net, netdev_priv(dev), cmd, ifr, NULL); - - netdev_put(dev, &dev_tracker); - rtnl_net_lock(net); - return err; - case SIOCDEVPRIVATE ... SIOCDEVPRIVATE + 15: return dev_siocdevprivate(dev, ifr, data, cmd);
@@ -812,8 +795,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: - case SIOCBRADDIF: - case SIOCBRDELIF: case SIOCSHWTSTAMP: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; diff --git a/net/core/dst.c b/net/core/dst.c index 9552a90d4772..6d76b799ce64 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -165,6 +165,14 @@ static void dst_count_dec(struct dst_entry *dst) void dst_release(struct dst_entry *dst) { if (dst && rcuref_put(&dst->__rcuref)) { +#ifdef CONFIG_DST_CACHE + if (dst->flags & DST_METADATA) { + struct metadata_dst *md_dst = (struct metadata_dst *)dst; + + if (md_dst->type == METADATA_IP_TUNNEL) + dst_cache_reset_now(&md_dst->u.tun_info.dst_cache); + } +#endif dst_count_dec(dst); call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu); } diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c index 715f85c6b62e..7832abc5ca6e 100644 --- a/net/core/netdev-genl.c +++ b/net/core/netdev-genl.c @@ -52,6 +52,8 @@ XDP_METADATA_KFUNC_xxx xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; + if (netdev->xsk_tx_metadata_ops->tmo_request_launch_time) + xsk_features |= NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO; }
if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d1e559fce918..80e006940f51 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1171,6 +1171,9 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, /* IFLA_VF_STATS_TX_DROPPED */ nla_total_size_64bit(sizeof(__u64))); } + if (dev->netdev_ops->ndo_get_vf_guid) + size += num_vfs * 2 * + nla_total_size(sizeof(struct ifla_vf_guid)); return size; } else return 0; diff --git a/net/core/rtnl_net_debug.c b/net/core/rtnl_net_debug.c index 7ecd28cc1c22..f3272b09c255 100644 --- a/net/core/rtnl_net_debug.c +++ b/net/core/rtnl_net_debug.c @@ -102,7 +102,7 @@ static int __init rtnl_net_debug_init(void) { int ret;
- ret = register_pernet_device(&rtnl_net_debug_net_ops); + ret = register_pernet_subsys(&rtnl_net_debug_net_ops); if (ret) return ret;
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index a3676155be78..f65d2f727381 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -416,7 +416,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
skb_dst_update_pmtu_no_confirm(skb, mtu);
- if (!reply || skb->pkt_type == PACKET_HOST) + if (!reply) return 0;
if (skb->protocol == htons(ETH_P_IP)) @@ -451,7 +451,7 @@ static const struct nla_policy geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = { [LWTUNNEL_IP_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [LWTUNNEL_IP_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, - [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 }, + [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 }, };
static const struct nla_policy diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index a9bb9ce5438e..3fe85ecec236 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1626,12 +1626,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb) }
/* fully reclaim rmem/fwd memory allocated for skb */ -static void udp_rmem_release(struct sock *sk, int size, int partial, - bool rx_queue_lock_held) +static void udp_rmem_release(struct sock *sk, unsigned int size, + int partial, bool rx_queue_lock_held) { struct udp_sock *up = udp_sk(sk); struct sk_buff_head *sk_queue; - int amt; + unsigned int amt;
if (likely(partial)) { up->forward_deficit += size; @@ -1651,10 +1651,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial, if (!rx_queue_lock_held) spin_lock(&sk_queue->lock);
- - sk_forward_alloc_add(sk, size); - amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); - sk_forward_alloc_add(sk, -amt); + amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); + sk_forward_alloc_add(sk, size - amt);
if (amt) __sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT); @@ -1726,17 +1724,25 @@ static int udp_rmem_schedule(struct sock *sk, int size) int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) { struct sk_buff_head *list = &sk->sk_receive_queue; - int rmem, err = -ENOMEM; + unsigned int rmem, rcvbuf; spinlock_t *busy = NULL; - int size, rcvbuf; + int size, err = -ENOMEM;
- /* Immediately drop when the receive queue is full. - * Always allow at least one packet. - */ rmem = atomic_read(&sk->sk_rmem_alloc); rcvbuf = READ_ONCE(sk->sk_rcvbuf); - if (rmem > rcvbuf) - goto drop; + size = skb->truesize; + + /* Immediately drop when the receive queue is full. + * Cast to unsigned int performs the boundary check for INT_MAX. + */ + if (rmem + size > rcvbuf) { + if (rcvbuf > INT_MAX >> 1) + goto drop; + + /* Always allow at least one packet for small buffer. */ + if (rmem > rcvbuf) + goto drop; + }
/* Under mem pressure, it might be helpful to help udp_recvmsg() * having linear skbs : @@ -1746,10 +1752,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) */ if (rmem > (rcvbuf >> 1)) { skb_condense(skb); - + size = skb->truesize; busy = busylock_acquire(sk); } - size = skb->truesize; + udp_set_dev_scratch(skb);
atomic_add(size, &sk->sk_rmem_alloc); @@ -1836,7 +1842,7 @@ EXPORT_SYMBOL_GPL(skb_consume_udp);
static struct sk_buff *__first_packet_length(struct sock *sk, struct sk_buff_head *rcvq, - int *total) + unsigned int *total) { struct sk_buff *skb;
@@ -1869,8 +1875,8 @@ static int first_packet_length(struct sock *sk) { struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; struct sk_buff_head *sk_queue = &sk->sk_receive_queue; + unsigned int total = 0; struct sk_buff *skb; - int total = 0; int res;
spin_lock_bh(&rcvq->lock); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ac8cc1076536..54a8ea004da2 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5784,6 +5784,27 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, } }
+static int inet6_fill_ifla6_stats_attrs(struct sk_buff *skb, + struct inet6_dev *idev) +{ + struct nlattr *nla; + + nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); + if (!nla) + goto nla_put_failure; + snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); + + nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); + if (!nla) + goto nla_put_failure; + snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, u32 ext_filter_mask) { @@ -5806,18 +5827,10 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
/* XXX - MC not implemented */
- if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS) - return 0; - - nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); - if (!nla) - goto nla_put_failure; - snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); - - nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); - if (!nla) - goto nla_put_failure; - snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); + if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS)) { + if (inet6_fill_ifla6_stats_attrs(skb, idev) < 0) + goto nla_put_failure; + }
nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr)); if (!nla) diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index dbcea9fee626..62618a058b8f 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -1072,8 +1072,13 @@ static int calipso_sock_getattr(struct sock *sk, struct ipv6_opt_hdr *hop; int opt_len, len, ret_val = -ENOMSG, offset; unsigned char *opt; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + + if (!pinfo) + return -EAFNOSUPPORT;
+ txopts = txopt_get(pinfo); if (!txopts || !txopts->hopopt) goto done;
@@ -1125,8 +1130,13 @@ static int calipso_sock_setattr(struct sock *sk, { int ret_val; struct ipv6_opt_hdr *old, *new; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + + if (!pinfo) + return -EAFNOSUPPORT;
+ txopts = txopt_get(pinfo); old = NULL; if (txopts) old = txopts->hopopt; @@ -1153,8 +1163,13 @@ static int calipso_sock_setattr(struct sock *sk, static void calipso_sock_delattr(struct sock *sk) { struct ipv6_opt_hdr *new_hop; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + + if (!pinfo) + return;
+ txopts = txopt_get(pinfo); if (!txopts || !txopts->hopopt) goto done;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 15ce21afc8c6..169a7b9bc40e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -412,12 +412,37 @@ static bool rt6_check_expired(const struct rt6_info *rt) return false; }
+static struct fib6_info * +rt6_multipath_first_sibling_rcu(const struct fib6_info *rt) +{ + struct fib6_info *iter; + struct fib6_node *fn; + + fn = rcu_dereference(rt->fib6_node); + if (!fn) + goto out; + iter = rcu_dereference(fn->leaf); + if (!iter) + goto out; + + while (iter) { + if (iter->fib6_metric == rt->fib6_metric && + rt6_qualify_for_ecmp(iter)) + return iter; + iter = rcu_dereference(iter->fib6_next); + } + +out: + return NULL; +} + void fib6_select_path(const struct net *net, struct fib6_result *res, struct flowi6 *fl6, int oif, bool have_oif_match, const struct sk_buff *skb, int strict) { - struct fib6_info *match = res->f6i; + struct fib6_info *first, *match = res->f6i; struct fib6_info *sibling; + int hash;
if (!match->nh && (!match->fib6_nsiblings || have_oif_match)) goto out; @@ -440,16 +465,25 @@ void fib6_select_path(const struct net *net, struct fib6_result *res, return; }
- if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound)) + first = rt6_multipath_first_sibling_rcu(match); + if (!first) goto out;
- list_for_each_entry_rcu(sibling, &match->fib6_siblings, + hash = fl6->mp_hash; + if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound) && + rt6_score_route(first->fib6_nh, first->fib6_flags, oif, + strict) >= 0) { + match = first; + goto out; + } + + list_for_each_entry_rcu(sibling, &first->fib6_siblings, fib6_siblings) { const struct fib6_nh *nh = sibling->fib6_nh; int nh_upper_bound;
nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound); - if (fl6->mp_hash > nh_upper_bound) + if (hash > nh_upper_bound) continue; if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0) break; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 9351c64608a9..b766472703b1 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1908,12 +1908,12 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, }
if (params->supported_rates && - params->supported_rates_len) { - ieee80211_parse_bitrates(link->conf->chanreq.oper.width, - sband, params->supported_rates, - params->supported_rates_len, - &link_sta->pub->supp_rates[sband->band]); - } + params->supported_rates_len && + !ieee80211_parse_bitrates(link->conf->chanreq.oper.width, + sband, params->supported_rates, + params->supported_rates_len, + &link_sta->pub->supp_rates[sband->band])) + return -EINVAL;
if (params->ht_capa) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 36a9be9a66c8..da2c2e6035be 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -9946,8 +9946,8 @@ ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata, size += 2 + sizeof(struct ieee80211_mle_per_sta_profile) + ETH_ALEN;
- /* SSID element + WMM */ - size += 2 + sdata->vif.cfg.ssid_len + 9; + /* WMM */ + size += 9; size += ieee80211_link_common_elems_size(sdata, iftype, cbss, elems_len); } @@ -10053,11 +10053,6 @@ ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata,
capab_pos = skb_put(skb, 2);
- skb_put_u8(skb, WLAN_EID_SSID); - skb_put_u8(skb, sdata->vif.cfg.ssid_len); - skb_put_data(skb, sdata->vif.cfg.ssid, - sdata->vif.cfg.ssid_len); - extra_used = ieee80211_add_link_elems(sdata, skb, &capab, NULL, add_links_data->link[link_id].elems, diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c2df81b7e950..a133e1c175ce 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2839,11 +2839,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, err = nft_netdev_register_hooks(ctx->net, &hook.list); if (err < 0) goto err_hooks; + + unregister = true; } }
- unregister = true; - if (nla[NFTA_CHAIN_COUNTERS]) { if (!nft_is_base_chain(chain)) { err = -EOPNOTSUPP; diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 75598520b0fa..6557a4018c09 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -21,25 +21,22 @@ #include <net/netfilter/nf_log.h> #include <net/netfilter/nft_meta.h>
-#if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_X86) - +#ifdef CONFIG_MITIGATION_RETPOLINE static struct static_key_false nf_tables_skip_direct_calls;
-static bool nf_skip_indirect_calls(void) +static inline bool nf_skip_indirect_calls(void) { return static_branch_likely(&nf_tables_skip_direct_calls); }
-static void __init nf_skip_indirect_calls_enable(void) +static inline void __init nf_skip_indirect_calls_enable(void) { if (!cpu_feature_enabled(X86_FEATURE_RETPOLINE)) static_branch_enable(&nf_tables_skip_direct_calls); } #else -static inline bool nf_skip_indirect_calls(void) { return false; } - static inline void nf_skip_indirect_calls_enable(void) { } -#endif +#endif /* CONFIG_MITIGATION_RETPOLINE */
static noinline void __nft_trace_packet(const struct nft_pktinfo *pkt, const struct nft_verdict *verdict, diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 5c913987901a..8b7b39d8a109 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -567,7 +567,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, enum ip_conntrack_info ctinfo = 0; const struct nfnl_ct_hook *nfnl_ct; bool csum_verify; - struct lsm_context ctx; + struct lsm_context ctx = { NULL, 0, 0 }; int seclen = 0; ktime_t tstamp;
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 8bfac4185ac7..abb0c8ec6371 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -309,7 +309,8 @@ static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set,
nft_setelem_expr_foreach(expr, elem_expr, size) { if (expr->ops->gc && - expr->ops->gc(read_pnet(&set->net), expr)) + expr->ops->gc(read_pnet(&set->net), expr) && + set->flags & NFT_SET_EVAL) return true; }
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index 681301b46aa4..0c63d1367cf7 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -335,13 +335,13 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr, static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = { [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 }, [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 }, - [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 }, + [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 }, };
static int nft_tunnel_obj_geneve_init(const struct nlattr *attr, struct nft_tunnel_opts *opts) { - struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len; + struct geneve_opt *opt = (struct geneve_opt *)(opts->u.data + opts->len); struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1]; int err, data_len;
@@ -625,7 +625,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb, if (!inner) goto failure; while (opts->len > offset) { - opt = (struct geneve_opt *)opts->u.data + offset; + opt = (struct geneve_opt *)(opts->u.data + offset); if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS, opt->opt_class) || nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE, diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 704c858cf209..61fea7baae5d 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -947,12 +947,6 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, pskb_trim(skb, ovs_mac_header_len(key)); }
- /* Need to set the pkt_type to involve the routing layer. The - * packet movement through the OVS datapath doesn't generally - * use routing, but this is needed for tunnel cases. - */ - skb->pkt_type = PACKET_OUTGOING; - if (likely(!mru || (skb->len <= mru + vport->dev->hard_header_len))) { ovs_vport_send(vport, skb, ovs_key_mac_proto(key)); diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index af7c99845948..e296714803dc 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -68,7 +68,7 @@ geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = { [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, - .len = 128 }, + .len = 127 }, };
static const struct nla_policy diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 03505673d523..099ff6a3e1f5 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -766,7 +766,7 @@ geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, - .len = 128 }, + .len = 127 }, };
static const struct nla_policy diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 20ff7386b74b..f485f62ab721 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -123,8 +123,6 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* Check to update highest and lowest priorities. */ if (skb_queue_empty(lp_qdisc)) { if (q->lowest_prio == q->highest_prio) { - /* The incoming packet is the only packet in queue. */ - BUG_ON(sch->q.qlen != 1); q->lowest_prio = prio; q->highest_prio = prio; } else { @@ -156,7 +154,6 @@ static struct sk_buff *skbprio_dequeue(struct Qdisc *sch) /* Update highest priority field. */ if (skb_queue_empty(hpq)) { if (q->lowest_prio == q->highest_prio) { - BUG_ON(sch->q.qlen); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; } else { diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 8e1e97be4df7..ee3eac338a9d 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -525,6 +525,8 @@ static int proc_sctp_do_auth(const struct ctl_table *ctl, int write, return ret; }
+static DEFINE_MUTEX(sctp_sysctl_mutex); + static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { @@ -549,6 +551,7 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write, if (new_value > max || new_value < min) return -EINVAL;
+ mutex_lock(&sctp_sysctl_mutex); net->sctp.udp_port = new_value; sctp_udp_sock_stop(net); if (new_value) { @@ -561,6 +564,7 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write, lock_sock(sk); sctp_sk(sk)->udp_port = htons(net->sctp.udp_port); release_sock(sk); + mutex_unlock(&sctp_sysctl_mutex); }
return ret; diff --git a/net/socket.c b/net/socket.c index 28bae5a94234..38227d00d198 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1145,12 +1145,10 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) */
static DEFINE_MUTEX(br_ioctl_mutex); -static int (*br_ioctl_hook)(struct net *net, struct net_bridge *br, - unsigned int cmd, struct ifreq *ifr, +static int (*br_ioctl_hook)(struct net *net, unsigned int cmd, void __user *uarg);
-void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br, - unsigned int cmd, struct ifreq *ifr, +void brioctl_set(int (*hook)(struct net *net, unsigned int cmd, void __user *uarg)) { mutex_lock(&br_ioctl_mutex); @@ -1159,8 +1157,7 @@ void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br, } EXPORT_SYMBOL(brioctl_set);
-int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg) +int br_ioctl_call(struct net *net, unsigned int cmd, void __user *uarg) { int err = -ENOPKG;
@@ -1169,7 +1166,7 @@ int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) - err = br_ioctl_hook(net, br, cmd, ifr, uarg); + err = br_ioctl_hook(net, cmd, uarg); mutex_unlock(&br_ioctl_mutex);
return err; @@ -1269,7 +1266,9 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: - err = br_ioctl_call(net, NULL, cmd, NULL, argp); + case SIOCBRADDIF: + case SIOCBRDELIF: + err = br_ioctl_call(net, cmd, argp); break; case SIOCGIFVLAN: case SIOCSIFVLAN: @@ -3429,6 +3428,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: + case SIOCBRADDIF: + case SIOCBRDELIF: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCGSKNS: @@ -3468,8 +3469,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: - case SIOCBRADDIF: - case SIOCBRDELIF: case SIOCGIFNAME: case SIOCSIFNAME: case SIOCGMIIPHY: diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 7e3db87ae433..fc6afbc8d680 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1551,7 +1551,11 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr, timeout = vsk->connect_timeout; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) { + /* If the socket is already closing or it is in an error state, there + * is no point in waiting. + */ + while (sk->sk_state != TCP_ESTABLISHED && + sk->sk_state != TCP_CLOSING && sk->sk_err == 0) { if (flags & O_NONBLOCK) { /* If we're not going to block, we schedule a timeout * function to generate a timeout on the connection diff --git a/net/wireless/core.c b/net/wireless/core.c index 828e29872633..ceb768925b85 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -546,6 +546,9 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, INIT_WORK(&rdev->mgmt_registrations_update_wk, cfg80211_mgmt_registrations_update_wk); spin_lock_init(&rdev->mgmt_registrations_lock); + INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work); + INIT_LIST_HEAD(&rdev->wiphy_work_list); + spin_lock_init(&rdev->wiphy_work_lock);
#ifdef CONFIG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -563,9 +566,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, return NULL; }
- INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work); - INIT_LIST_HEAD(&rdev->wiphy_work_list); - spin_lock_init(&rdev->wiphy_work_lock); INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work); INIT_WORK(&rdev->conn_work, cfg80211_conn_work); INIT_WORK(&rdev->event_work, cfg80211_event_work); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index aac0e7298dc7..b457fe78672b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -10172,7 +10172,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: - wdev->links[0].ap.chandef = chandef; + wdev->links[link_id].ap.chandef = chandef; break; case NL80211_IFTYPE_ADHOC: wdev->u.ibss.chandef = chandef; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 89d2bef96469..a373a7130d75 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -742,6 +742,9 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, goto free_err; } } + + if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME) + skb->skb_mstamp_ns = meta->request.launch_time; } }
@@ -802,8 +805,11 @@ static int __xsk_generic_xmit(struct sock *sk) * if there is space in it. This avoids having to implement * any buffering in the Tx path. */ - if (xsk_cq_reserve_addr_locked(xs->pool, desc.addr)) + err = xsk_cq_reserve_addr_locked(xs->pool, desc.addr); + if (err) { + err = -EAGAIN; goto out; + }
skb = xsk_build_skb(xs, &desc); if (IS_ERR(skb)) { diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index d1fa94e52cea..97c8030cc417 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -244,11 +244,6 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, xfrm_address_t *daddr; bool is_packet_offload;
- if (!x->type_offload) { - NL_SET_ERR_MSG(extack, "Type doesn't support offload"); - return -EINVAL; - } - if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) { NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request"); @@ -310,6 +305,13 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, return -EINVAL; }
+ xfrm_set_type_offload(x); + if (!x->type_offload) { + NL_SET_ERR_MSG(extack, "Type doesn't support offload"); + dev_put(dev); + return -EINVAL; + } + xso->dev = dev; netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); xso->real_dev = dev; @@ -332,6 +334,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, netdev_put(dev, &xso->dev_tracker); xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+ xfrm_unset_type_offload(x); /* User explicitly requested packet offload mode and configured * policy in addition to the XFRM state. So be civil to users, * and return an error instead of taking fallback path. diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index ad2202fa82f3..69af5964c886 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -424,18 +424,18 @@ void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, } EXPORT_SYMBOL(xfrm_unregister_type_offload);
-static const struct xfrm_type_offload * -xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load) +void xfrm_set_type_offload(struct xfrm_state *x) { const struct xfrm_type_offload *type = NULL; struct xfrm_state_afinfo *afinfo; + bool try_load = true;
retry: - afinfo = xfrm_state_get_afinfo(family); + afinfo = xfrm_state_get_afinfo(x->props.family); if (unlikely(afinfo == NULL)) - return NULL; + goto out;
- switch (proto) { + switch (x->id.proto) { case IPPROTO_ESP: type = afinfo->type_offload_esp; break; @@ -449,18 +449,16 @@ xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load) rcu_read_unlock();
if (!type && try_load) { - request_module("xfrm-offload-%d-%d", family, proto); + request_module("xfrm-offload-%d-%d", x->props.family, + x->id.proto); try_load = false; goto retry; }
- return type; -} - -static void xfrm_put_type_offload(const struct xfrm_type_offload *type) -{ - module_put(type->owner); +out: + x->type_offload = type; } +EXPORT_SYMBOL(xfrm_set_type_offload);
static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = { [XFRM_MODE_BEET] = { @@ -609,8 +607,6 @@ static void ___xfrm_state_destroy(struct xfrm_state *x) kfree(x->coaddr); kfree(x->replay_esn); kfree(x->preplay_esn); - if (x->type_offload) - xfrm_put_type_offload(x->type_offload); if (x->type) { x->type->destructor(x); xfrm_put_type(x->type); @@ -784,6 +780,8 @@ void xfrm_dev_state_free(struct xfrm_state *x) struct xfrm_dev_offload *xso = &x->xso; struct net_device *dev = READ_ONCE(xso->dev);
+ xfrm_unset_type_offload(x); + if (dev && dev->xfrmdev_ops) { spin_lock_bh(&xfrm_state_dev_gc_lock); if (!hlist_unhashed(&x->dev_gclist)) @@ -3122,7 +3120,7 @@ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) } EXPORT_SYMBOL_GPL(xfrm_state_mtu);
-int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, +int __xfrm_init_state(struct xfrm_state *x, bool init_replay, struct netlink_ext_ack *extack) { const struct xfrm_mode *inner_mode; @@ -3178,8 +3176,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, goto error; }
- x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload); - err = x->type->init_state(x, extack); if (err) goto error; @@ -3229,7 +3225,7 @@ int xfrm_init_state(struct xfrm_state *x) { int err;
- err = __xfrm_init_state(x, true, false, NULL); + err = __xfrm_init_state(x, true, NULL); if (!err) x->km.state = XFRM_STATE_VALID;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 08c6d6f0179f..82a768500999 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -907,7 +907,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, goto error; }
- err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack); + err = __xfrm_init_state(x, false, extack); if (err) goto error;
diff --git a/rust/Makefile b/rust/Makefile index ea3849eb78f6..2c57c624fe7d 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -232,7 +232,8 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \ -mfunction-return=thunk-extern -mrecord-mcount -mabi=lp64 \ -mindirect-branch-cs-prefix -mstack-protector-guard% -mtraceback=no \ -mno-pointers-to-nested-functions -mno-string \ - -mno-strict-align -mstrict-align \ + -mno-strict-align -mstrict-align -mdirect-extern-access \ + -mexplicit-relocs -mno-check-zero-division \ -fconserve-stack -falign-jumps=% -falign-loops=% \ -femit-struct-debug-baseonly -fno-ipa-cp-clone -fno-ipa-sra \ -fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \ @@ -246,6 +247,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \ # Derived from `scripts/Makefile.clang`. BINDGEN_TARGET_x86 := x86_64-linux-gnu BINDGEN_TARGET_arm64 := aarch64-linux-gnu +BINDGEN_TARGET_loongarch := loongarch64-linux-gnusf BINDGEN_TARGET := $(BINDGEN_TARGET_$(SRCARCH))
# All warnings are inhibited since GCC builds are very experimental, diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs index b19ee490be58..61ee36c5e5f5 100644 --- a/rust/kernel/print.rs +++ b/rust/kernel/print.rs @@ -6,12 +6,11 @@ //! //! Reference: https://docs.kernel.org/core-api/printk-basics.html
-use core::{ +use crate::{ ffi::{c_char, c_void}, - fmt, + str::RawFormatter, }; - -use crate::str::RawFormatter; +use core::fmt;
// Called from `vsprintf` with format specifier `%pA`. #[expect(clippy::missing_safety_doc)] diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index dd9944a97b7e..5b632635e00d 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -307,7 +307,7 @@ $(obj)/$(TRACE_HELPERS): TPROGS_CFLAGS := $(TPROGS_CFLAGS) -D__must_check=
VMLINUX_BTF_PATHS ?= $(abspath $(if $(O),$(O)/vmlinux)) \ $(abspath $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)) \ - $(abspath ./vmlinux) + $(abspath $(objtree)/vmlinux) VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
$(obj)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h index 999f78d380ae..1a05fc153353 100644 --- a/samples/trace_events/trace-events-sample.h +++ b/samples/trace_events/trace-events-sample.h @@ -319,7 +319,8 @@ TRACE_EVENT(foo_bar, __assign_cpumask(cpum, cpumask_bits(mask)); ),
- TP_printk("foo %s %d %s %s %s %s %s %s (%s) (%s) %s", __entry->foo, __entry->bar, + TP_printk("foo %s %d %s %s %s %s %s %s (%s) (%s) %s [%d] %*pbl", + __entry->foo, __entry->bar,
/* * Notice here the use of some helper functions. This includes: @@ -370,7 +371,10 @@ TRACE_EVENT(foo_bar,
__get_str(str), __get_str(lstr), __get_bitmask(cpus), __get_cpumask(cpum), - __get_str(vstr)) + __get_str(vstr), + __get_dynamic_array_len(cpus), + __get_dynamic_array_len(cpus), + __get_dynamic_array(cpus)) );
/* diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py index f6c1b063775a..15d76f7d8ebc 100644 --- a/scripts/gdb/linux/symbols.py +++ b/scripts/gdb/linux/symbols.py @@ -15,6 +15,7 @@ import gdb import os import re
+from itertools import count from linux import modules, utils, constants
@@ -95,10 +96,14 @@ lx-symbols command.""" except gdb.error: return str(module_addr)
- attrs = sect_attrs['attrs'] - section_name_to_address = { - attrs[n]['battr']['attr']['name'].string(): attrs[n]['address'] - for n in range(int(sect_attrs['nsections']))} + section_name_to_address = {} + for i in count(): + # this is a NULL terminated array + if sect_attrs['grp']['bin_attrs'][i] == 0x0: + break + + attr = sect_attrs['grp']['bin_attrs'][i].dereference() + section_name_to_address[attr['attr']['name'].string()] = attr['private']
textaddr = section_name_to_address.get(".text", module_addr) args = [] diff --git a/scripts/package/debian/rules b/scripts/package/debian/rules index ca07243bd5cd..2b3f9a0bd6c4 100755 --- a/scripts/package/debian/rules +++ b/scripts/package/debian/rules @@ -21,9 +21,11 @@ ifeq ($(origin KBUILD_VERBOSE),undefined) endif endif
-revision = $(lastword $(subst -, ,$(shell dpkg-parsechangelog -S Version))) +revision = $(shell dpkg-parsechangelog -S Version | sed -n 's/.*-//p') CROSS_COMPILE ?= $(filter-out $(DEB_BUILD_GNU_TYPE)-, $(DEB_HOST_GNU_TYPE)-) -make-opts = ARCH=$(ARCH) KERNELRELEASE=$(KERNELRELEASE) KBUILD_BUILD_VERSION=$(revision) $(addprefix CROSS_COMPILE=,$(CROSS_COMPILE)) +make-opts = ARCH=$(ARCH) KERNELRELEASE=$(KERNELRELEASE) \ + $(addprefix KBUILD_BUILD_VERSION=,$(revision)) \ + $(addprefix CROSS_COMPILE=,$(CROSS_COMPILE))
binary-targets := $(addprefix binary-, image image-dbg headers libc-dev)
diff --git a/scripts/selinux/install_policy.sh b/scripts/selinux/install_policy.sh index 24086793b0d8..db40237e60ce 100755 --- a/scripts/selinux/install_policy.sh +++ b/scripts/selinux/install_policy.sh @@ -6,27 +6,24 @@ if [ `id -u` -ne 0 ]; then exit 1 fi
-SF=`which setfiles` -if [ $? -eq 1 ]; then +SF=`which setfiles` || { echo "Could not find setfiles" echo "Do you have policycoreutils installed?" exit 1 -fi +}
-CP=`which checkpolicy` -if [ $? -eq 1 ]; then +CP=`which checkpolicy` || { echo "Could not find checkpolicy" echo "Do you have checkpolicy installed?" exit 1 -fi +} VERS=`$CP -V | awk '{print $1}'`
-ENABLED=`which selinuxenabled` -if [ $? -eq 1 ]; then +ENABLED=`which selinuxenabled` || { echo "Could not find selinuxenabled" echo "Do you have libselinux-utils installed?" exit 1 -fi +}
if selinuxenabled; then echo "SELinux is already enabled" diff --git a/security/smack/smack.h b/security/smack/smack.h index 4608b07607a3..c4d998972ba5 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -152,6 +152,7 @@ struct smk_net4addr { struct smack_known *smk_label; /* label */ };
+#if IS_ENABLED(CONFIG_IPV6) /* * An entry in the table identifying IPv6 hosts. */ @@ -162,7 +163,9 @@ struct smk_net6addr { int smk_masks; /* mask size */ struct smack_known *smk_label; /* label */ }; +#endif /* CONFIG_IPV6 */
+#ifdef SMACK_IPV6_PORT_LABELING /* * An entry in the table identifying ports. */ @@ -175,6 +178,7 @@ struct smk_port_label { short smk_sock_type; /* Socket type */ short smk_can_reuse; }; +#endif /* SMACK_IPV6_PORT_LABELING */
struct smack_known_list_elem { struct list_head list; @@ -315,7 +319,9 @@ extern struct smack_known smack_known_web; extern struct mutex smack_known_lock; extern struct list_head smack_known_list; extern struct list_head smk_net4addr_list; +#if IS_ENABLED(CONFIG_IPV6) extern struct list_head smk_net6addr_list; +#endif /* CONFIG_IPV6 */
extern struct mutex smack_onlycap_lock; extern struct list_head smack_onlycap_list; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 239773cdcdcf..e68c982e499e 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2492,6 +2492,7 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip) return NULL; }
+#if IS_ENABLED(CONFIG_IPV6) /* * smk_ipv6_localhost - Check for local ipv6 host address * @sip: the address @@ -2559,6 +2560,7 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
return NULL; } +#endif /* CONFIG_IPV6 */
/** * smack_netlbl_add - Set the secattr on a socket @@ -2663,6 +2665,7 @@ static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap) return rc; }
+#if IS_ENABLED(CONFIG_IPV6) /** * smk_ipv6_check - check Smack access * @subject: subject Smack label @@ -2695,6 +2698,7 @@ static int smk_ipv6_check(struct smack_known *subject, rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc); return rc; } +#endif /* CONFIG_IPV6 */
#ifdef SMACK_IPV6_PORT_LABELING /** @@ -3027,7 +3031,9 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, return 0; if (addrlen < offsetofend(struct sockaddr, sa_family)) return 0; - if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) { + +#if IS_ENABLED(CONFIG_IPV6) + if (sap->sa_family == AF_INET6) { struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; struct smack_known *rsp = NULL;
@@ -3047,6 +3053,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
return rc; } +#endif /* CONFIG_IPV6 */ + if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in)) return 0; rc = smk_ipv4_check(sock->sk, (struct sockaddr_in *)sap); @@ -4342,29 +4350,6 @@ static int smack_socket_getpeersec_dgram(struct socket *sock, return 0; }
-/** - * smack_sock_graft - Initialize a newly created socket with an existing sock - * @sk: child sock - * @parent: parent socket - * - * Set the smk_{in,out} state of an existing sock based on the process that - * is creating the new socket. - */ -static void smack_sock_graft(struct sock *sk, struct socket *parent) -{ - struct socket_smack *ssp; - struct smack_known *skp = smk_of_current(); - - if (sk == NULL || - (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)) - return; - - ssp = smack_sock(sk); - ssp->smk_in = skp; - ssp->smk_out = skp; - /* cssp->smk_packet is already set in smack_inet_csk_clone() */ -} - /** * smack_inet_conn_request - Smack access check on connect * @sk: socket involved @@ -5179,7 +5164,6 @@ static struct security_hook_list smack_hooks[] __ro_after_init = { LSM_HOOK_INIT(sk_free_security, smack_sk_free_security), #endif LSM_HOOK_INIT(sk_clone_security, smack_sk_clone_security), - LSM_HOOK_INIT(sock_graft, smack_sock_graft), LSM_HOOK_INIT(inet_conn_request, smack_inet_conn_request), LSM_HOOK_INIT(inet_csk_clone, smack_inet_csk_clone),
diff --git a/sound/core/timer.c b/sound/core/timer.c index fbada79380f9..d774b9b71ce2 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1515,91 +1515,97 @@ static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *ti id->subdevice = timer->tmr_subdevice; }
-static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) +static void get_next_device(struct snd_timer_id *id) { - struct snd_timer_id id; struct snd_timer *timer; struct list_head *p;
- if (copy_from_user(&id, _tid, sizeof(id))) - return -EFAULT; - guard(mutex)(®ister_mutex); - if (id.dev_class < 0) { /* first item */ + if (id->dev_class < 0) { /* first item */ if (list_empty(&snd_timer_list)) - snd_timer_user_zero_id(&id); + snd_timer_user_zero_id(id); else { timer = list_entry(snd_timer_list.next, struct snd_timer, device_list); - snd_timer_user_copy_id(&id, timer); + snd_timer_user_copy_id(id, timer); } } else { - switch (id.dev_class) { + switch (id->dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: - id.device = id.device < 0 ? 0 : id.device + 1; + id->device = id->device < 0 ? 0 : id->device + 1; list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) { - snd_timer_user_copy_id(&id, timer); + snd_timer_user_copy_id(id, timer); break; } - if (timer->tmr_device >= id.device) { - snd_timer_user_copy_id(&id, timer); + if (timer->tmr_device >= id->device) { + snd_timer_user_copy_id(id, timer); break; } } if (p == &snd_timer_list) - snd_timer_user_zero_id(&id); + snd_timer_user_zero_id(id); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: - if (id.card < 0) { - id.card = 0; + if (id->card < 0) { + id->card = 0; } else { - if (id.device < 0) { - id.device = 0; + if (id->device < 0) { + id->device = 0; } else { - if (id.subdevice < 0) - id.subdevice = 0; - else if (id.subdevice < INT_MAX) - id.subdevice++; + if (id->subdevice < 0) + id->subdevice = 0; + else if (id->subdevice < INT_MAX) + id->subdevice++; } } list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); - if (timer->tmr_class > id.dev_class) { - snd_timer_user_copy_id(&id, timer); + if (timer->tmr_class > id->dev_class) { + snd_timer_user_copy_id(id, timer); break; } - if (timer->tmr_class < id.dev_class) + if (timer->tmr_class < id->dev_class) continue; - if (timer->card->number > id.card) { - snd_timer_user_copy_id(&id, timer); + if (timer->card->number > id->card) { + snd_timer_user_copy_id(id, timer); break; } - if (timer->card->number < id.card) + if (timer->card->number < id->card) continue; - if (timer->tmr_device > id.device) { - snd_timer_user_copy_id(&id, timer); + if (timer->tmr_device > id->device) { + snd_timer_user_copy_id(id, timer); break; } - if (timer->tmr_device < id.device) + if (timer->tmr_device < id->device) continue; - if (timer->tmr_subdevice > id.subdevice) { - snd_timer_user_copy_id(&id, timer); + if (timer->tmr_subdevice > id->subdevice) { + snd_timer_user_copy_id(id, timer); break; } - if (timer->tmr_subdevice < id.subdevice) + if (timer->tmr_subdevice < id->subdevice) continue; - snd_timer_user_copy_id(&id, timer); + snd_timer_user_copy_id(id, timer); break; } if (p == &snd_timer_list) - snd_timer_user_zero_id(&id); + snd_timer_user_zero_id(id); break; default: - snd_timer_user_zero_id(&id); + snd_timer_user_zero_id(id); } } +} + +static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) +{ + struct snd_timer_id id; + + if (copy_from_user(&id, _tid, sizeof(id))) + return -EFAULT; + scoped_guard(mutex, ®ister_mutex) + get_next_device(&id); if (copy_to_user(_tid, &id, sizeof(*_tid))) return -EFAULT; return 0; @@ -1620,23 +1626,24 @@ static int snd_timer_user_ginfo(struct file *file, tid = ginfo->tid; memset(ginfo, 0, sizeof(*ginfo)); ginfo->tid = tid; - guard(mutex)(®ister_mutex); - t = snd_timer_find(&tid); - if (!t) - return -ENODEV; - ginfo->card = t->card ? t->card->number : -1; - if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) - ginfo->flags |= SNDRV_TIMER_FLG_SLAVE; - strscpy(ginfo->id, t->id, sizeof(ginfo->id)); - strscpy(ginfo->name, t->name, sizeof(ginfo->name)); - scoped_guard(spinlock_irq, &t->lock) - ginfo->resolution = snd_timer_hw_resolution(t); - if (t->hw.resolution_min > 0) { - ginfo->resolution_min = t->hw.resolution_min; - ginfo->resolution_max = t->hw.resolution_max; - } - list_for_each(p, &t->open_list_head) { - ginfo->clients++; + scoped_guard(mutex, ®ister_mutex) { + t = snd_timer_find(&tid); + if (!t) + return -ENODEV; + ginfo->card = t->card ? t->card->number : -1; + if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) + ginfo->flags |= SNDRV_TIMER_FLG_SLAVE; + strscpy(ginfo->id, t->id, sizeof(ginfo->id)); + strscpy(ginfo->name, t->name, sizeof(ginfo->name)); + scoped_guard(spinlock_irq, &t->lock) + ginfo->resolution = snd_timer_hw_resolution(t); + if (t->hw.resolution_min > 0) { + ginfo->resolution_min = t->hw.resolution_min; + ginfo->resolution_max = t->hw.resolution_max; + } + list_for_each(p, &t->open_list_head) { + ginfo->clients++; + } } if (copy_to_user(_ginfo, ginfo, sizeof(*ginfo))) return -EFAULT; @@ -1674,31 +1681,31 @@ static int snd_timer_user_gstatus(struct file *file, struct snd_timer_gstatus gstatus; struct snd_timer_id tid; struct snd_timer *t; - int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus))) return -EFAULT; tid = gstatus.tid; memset(&gstatus, 0, sizeof(gstatus)); gstatus.tid = tid; - guard(mutex)(®ister_mutex); - t = snd_timer_find(&tid); - if (t != NULL) { - guard(spinlock_irq)(&t->lock); - gstatus.resolution = snd_timer_hw_resolution(t); - if (t->hw.precise_resolution) { - t->hw.precise_resolution(t, &gstatus.resolution_num, - &gstatus.resolution_den); + scoped_guard(mutex, ®ister_mutex) { + t = snd_timer_find(&tid); + if (t != NULL) { + guard(spinlock_irq)(&t->lock); + gstatus.resolution = snd_timer_hw_resolution(t); + if (t->hw.precise_resolution) { + t->hw.precise_resolution(t, &gstatus.resolution_num, + &gstatus.resolution_den); + } else { + gstatus.resolution_num = gstatus.resolution; + gstatus.resolution_den = 1000000000uL; + } } else { - gstatus.resolution_num = gstatus.resolution; - gstatus.resolution_den = 1000000000uL; + return -ENODEV; } - } else { - err = -ENODEV; } - if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus))) - err = -EFAULT; - return err; + if (copy_to_user(_gstatus, &gstatus, sizeof(gstatus))) + return -EFAULT; + return 0; }
static int snd_timer_user_tselect(struct file *file, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 78aab243c8b6..65ece19a6dd7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -586,6 +586,9 @@ static void alc_shutup_pins(struct hda_codec *codec) { struct alc_spec *spec = codec->spec;
+ if (spec->no_shutup_pins) + return; + switch (codec->core.vendor_id) { case 0x10ec0236: case 0x10ec0256: @@ -601,8 +604,7 @@ static void alc_shutup_pins(struct hda_codec *codec) alc_headset_mic_no_shutup(codec); break; default: - if (!spec->no_shutup_pins) - snd_hda_shutup_pins(codec); + snd_hda_shutup_pins(codec); break; } } @@ -10700,6 +10702,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x106f, "ASUS VivoBook X515UA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1074, "ASUS G614PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x10a4, "ASUS TP3407SA", ALC287_FIXUP_TAS2781_I2C), @@ -10733,6 +10736,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601VV/VU/VJ/VQ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G614JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS G513PI/PU/PV", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x14f2, "ASUS VivoBook X515JA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1503, "ASUS G733PY/PZ/PZV/PYV", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2), @@ -10772,6 +10776,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1c63, "ASUS GU605M", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1), + SND_PCI_QUIRK(0x1043, 0x1c80, "ASUS VivoBook TP401", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), diff --git a/sound/soc/amd/acp/acp-legacy-common.c b/sound/soc/amd/acp/acp-legacy-common.c index 7acc7ed2e8cc..b9f085c560c2 100644 --- a/sound/soc/amd/acp/acp-legacy-common.c +++ b/sound/soc/amd/acp/acp-legacy-common.c @@ -13,6 +13,7 @@ */
#include "amd.h" +#include <linux/acpi.h> #include <linux/pci.h> #include <linux/export.h>
@@ -445,7 +446,9 @@ void check_acp_config(struct pci_dev *pci, struct acp_chip_info *chip) { struct acpi_device *pdm_dev; const union acpi_object *obj; - u32 pdm_addr; + acpi_handle handle; + acpi_integer dmic_status; + u32 pdm_addr, ret;
switch (chip->acp_rev) { case ACP_RN_PCI_ID: @@ -477,6 +480,11 @@ void check_acp_config(struct pci_dev *pci, struct acp_chip_info *chip) obj->integer.value == pdm_addr) chip->is_pdm_dev = true; } + + handle = ACPI_HANDLE(&pci->dev); + ret = acpi_evaluate_integer(handle, "_WOV", NULL, &dmic_status); + if (!ACPI_FAILURE(ret)) + chip->is_pdm_dev = dmic_status; } } EXPORT_SYMBOL_NS_GPL(check_acp_config, "SND_SOC_ACP_COMMON"); diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c index a6db44520c06..f9b6bf7bea9c 100644 --- a/sound/soc/codecs/cs35l41-spi.c +++ b/sound/soc/codecs/cs35l41-spi.c @@ -32,13 +32,16 @@ static int cs35l41_spi_probe(struct spi_device *spi) const struct regmap_config *regmap_config = &cs35l41_regmap_spi; struct cs35l41_hw_cfg *hw_cfg = dev_get_platdata(&spi->dev); struct cs35l41_private *cs35l41; + int ret;
cs35l41 = devm_kzalloc(&spi->dev, sizeof(struct cs35l41_private), GFP_KERNEL); if (!cs35l41) return -ENOMEM;
spi->max_speed_hz = CS35L41_SPI_MAX_FREQ; - spi_setup(spi); + ret = spi_setup(spi); + if (ret < 0) + return ret;
spi_set_drvdata(spi, cs35l41); cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config); diff --git a/sound/soc/codecs/mt6359.c b/sound/soc/codecs/mt6359.c index 0b76a55664b0..f73120c6a6ce 100644 --- a/sound/soc/codecs/mt6359.c +++ b/sound/soc/codecs/mt6359.c @@ -2867,9 +2867,12 @@ static int mt6359_parse_dt(struct mt6359_priv *priv) struct device *dev = priv->dev; struct device_node *np;
- np = of_get_child_by_name(dev->parent->of_node, "mt6359codec"); - if (!np) - return -EINVAL; + np = of_get_child_by_name(dev->parent->of_node, "audio-codec"); + if (!np) { + np = of_get_child_by_name(dev->parent->of_node, "mt6359codec"); + if (!np) + return -EINVAL; + }
ret = of_property_read_u32(np, "mediatek,dmic-mode", &priv->dmic_one_wire_mode); diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c index 47df14ba5278..4f0236b34a2d 100644 --- a/sound/soc/codecs/rt5665.c +++ b/sound/soc/codecs/rt5665.c @@ -31,9 +31,7 @@ #include "rl6231.h" #include "rt5665.h"
-#define RT5665_NUM_SUPPLIES 3 - -static const char *rt5665_supply_names[RT5665_NUM_SUPPLIES] = { +static const char * const rt5665_supply_names[] = { "AVDD", "MICVDD", "VBAT", @@ -46,7 +44,6 @@ struct rt5665_priv { struct gpio_desc *gpiod_ldo1_en; struct gpio_desc *gpiod_reset; struct snd_soc_jack *hs_jack; - struct regulator_bulk_data supplies[RT5665_NUM_SUPPLIES]; struct delayed_work jack_detect_work; struct delayed_work calibrate_work; struct delayed_work jd_check_work; @@ -4471,8 +4468,6 @@ static void rt5665_remove(struct snd_soc_component *component) struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
regmap_write(rt5665->regmap, RT5665_RESET, 0); - - regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies); }
#ifdef CONFIG_PM @@ -4758,7 +4753,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c) { struct rt5665_platform_data *pdata = dev_get_platdata(&i2c->dev); struct rt5665_priv *rt5665; - int i, ret; + int ret; unsigned int val;
rt5665 = devm_kzalloc(&i2c->dev, sizeof(struct rt5665_priv), @@ -4774,24 +4769,13 @@ static int rt5665_i2c_probe(struct i2c_client *i2c) else rt5665_parse_dt(rt5665, &i2c->dev);
- for (i = 0; i < ARRAY_SIZE(rt5665->supplies); i++) - rt5665->supplies[i].supply = rt5665_supply_names[i]; - - ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5665->supplies), - rt5665->supplies); + ret = devm_regulator_bulk_get_enable(&i2c->dev, ARRAY_SIZE(rt5665_supply_names), + rt5665_supply_names); if (ret != 0) { dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret); return ret; }
- ret = regulator_bulk_enable(ARRAY_SIZE(rt5665->supplies), - rt5665->supplies); - if (ret != 0) { - dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret); - return ret; - } - - rt5665->gpiod_ldo1_en = devm_gpiod_get_optional(&i2c->dev, "realtek,ldo1-en", GPIOD_OUT_HIGH); diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c index ac043ad367ac..21f617f6f9fa 100644 --- a/sound/soc/fsl/imx-card.c +++ b/sound/soc/fsl/imx-card.c @@ -767,6 +767,8 @@ static int imx_card_probe(struct platform_device *pdev) data->dapm_routes[i].sink = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s", i + 1, "Playback"); + if (!data->dapm_routes[i].sink) + return -ENOMEM; data->dapm_routes[i].source = "CPU-Playback"; } } @@ -784,6 +786,8 @@ static int imx_card_probe(struct platform_device *pdev) data->dapm_routes[i].source = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s", i + 1, "Capture"); + if (!data->dapm_routes[i].source) + return -ENOMEM; data->dapm_routes[i].sink = "CPU-Capture"; } } diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c index c2445c5ccd84..32efb30c55d6 100644 --- a/sound/soc/generic/simple-card-utils.c +++ b/sound/soc/generic/simple-card-utils.c @@ -1077,6 +1077,7 @@ static int graph_get_dai_id(struct device_node *ep) int graph_util_parse_dai(struct device *dev, struct device_node *ep, struct snd_soc_dai_link_component *dlc, int *is_single_link) { + struct device_node *node; struct of_phandle_args args = {}; struct snd_soc_dai *dai; int ret; @@ -1084,7 +1085,7 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep, if (!ep) return 0;
- struct device_node *node __free(device_node) = of_graph_get_port_parent(ep); + node = of_graph_get_port_parent(ep);
/* * Try to find from DAI node @@ -1126,8 +1127,10 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep, * if he unbinded CPU or Codec. */ ret = snd_soc_get_dlc(&args, dlc); - if (ret < 0) + if (ret < 0) { + of_node_put(node); return ret; + }
parse_dai_end: if (is_single_link) diff --git a/sound/soc/tegra/tegra210_adx.c b/sound/soc/tegra/tegra210_adx.c index 0aa93b948378..3c10e09976ad 100644 --- a/sound/soc/tegra/tegra210_adx.c +++ b/sound/soc/tegra/tegra210_adx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -// SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. +// SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. // All rights reserved. // // tegra210_adx.c - Tegra210 ADX driver @@ -57,8 +57,8 @@ static int tegra210_adx_startup(struct snd_pcm_substream *substream, int err;
/* Ensure if ADX status is disabled */ - err = regmap_read_poll_timeout_atomic(adx->regmap, TEGRA210_ADX_STATUS, - val, !(val & 0x1), 10, 10000); + err = regmap_read_poll_timeout(adx->regmap, TEGRA210_ADX_STATUS, + val, !(val & 0x1), 10, 10000); if (err < 0) { dev_err(dai->dev, "failed to stop ADX, err = %d\n", err); return err; diff --git a/sound/soc/ti/j721e-evm.c b/sound/soc/ti/j721e-evm.c index d9d1e021f5b2..0f96cc45578d 100644 --- a/sound/soc/ti/j721e-evm.c +++ b/sound/soc/ti/j721e-evm.c @@ -182,6 +182,8 @@ static int j721e_configure_refclk(struct j721e_priv *priv, clk_id = J721E_CLK_PARENT_48000; else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_44100]) clk_id = J721E_CLK_PARENT_44100; + else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_48000]) + clk_id = J721E_CLK_PARENT_48000; else return ret;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 3d36d22f8e9e..62b28e9d83c7 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -3688,8 +3688,7 @@ static const char *snd_djm_get_label(u8 device_idx, u16 wvalue, u16 windex)
// common DJM capture level option values static const u16 snd_djm_opts_cap_level[] = { - 0x0000, 0x0100, 0x0200, 0x0300, 0x400, 0x500 }; - + 0x0000, 0x0100, 0x0200, 0x0300 };
// DJM-250MK2 static const u16 snd_djm_opts_250mk2_cap1[] = { @@ -3831,6 +3830,8 @@ static const struct snd_djm_ctl snd_djm_ctls_750mk2[] = {
// DJM-A9 +static const u16 snd_djm_opts_a9_cap_level[] = { + 0x0000, 0x0100, 0x0200, 0x0300, 0x0400, 0x0500 }; static const u16 snd_djm_opts_a9_cap1[] = { 0x0107, 0x0108, 0x0109, 0x010a, 0x010e, 0x111, 0x112, 0x113, 0x114, 0x0131, 0x132, 0x133, 0x134 }; @@ -3844,7 +3845,7 @@ static const u16 snd_djm_opts_a9_cap5[] = { 0x0501, 0x0502, 0x0503, 0x0505, 0x0506, 0x0507, 0x0508, 0x0509, 0x050a, 0x050e };
static const struct snd_djm_ctl snd_djm_ctls_a9[] = { - SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL), + SND_DJM_CTL("Capture Level", a9_cap_level, 0, SND_DJM_WINDEX_CAPLVL), SND_DJM_CTL("Master Input", a9_cap1, 3, SND_DJM_WINDEX_CAP), SND_DJM_CTL("Ch1 Input", a9_cap2, 2, SND_DJM_WINDEX_CAP), SND_DJM_CTL("Ch2 Input", a9_cap3, 2, SND_DJM_WINDEX_CAP), diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c index ab5cdc3337da..e91d4c4e1c16 100644 --- a/tools/arch/x86/lib/insn.c +++ b/tools/arch/x86/lib/insn.c @@ -13,7 +13,7 @@ #endif #include "../include/asm/inat.h" /* __ignore_sync_check__ */ #include "../include/asm/insn.h" /* __ignore_sync_check__ */ -#include "../include/linux/unaligned.h" /* __ignore_sync_check__ */ +#include <linux/unaligned.h> /* __ignore_sync_check__ */
#include <linux/errno.h> #include <linux/kconfig.h> diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile index e49203ebd48c..78a436c4072e 100644 --- a/tools/bpf/runqslower/Makefile +++ b/tools/bpf/runqslower/Makefile @@ -6,6 +6,7 @@ OUTPUT ?= $(abspath .output)/ BPFTOOL_OUTPUT := $(OUTPUT)bpftool/ DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)bootstrap/bpftool BPFTOOL ?= $(DEFAULT_BPFTOOL) +BPF_TARGET_ENDIAN ?= --target=bpf LIBBPF_SRC := $(abspath ../../lib/bpf) BPFOBJ_OUTPUT := $(OUTPUT)libbpf/ BPFOBJ := $(BPFOBJ_OUTPUT)libbpf.a @@ -60,7 +61,7 @@ $(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(BPFTOOL) $(QUIET_GEN)$(BPFTOOL) gen skeleton $< > $@
$(OUTPUT)/%.bpf.o: %.bpf.c $(BPFOBJ) | $(OUTPUT) - $(QUIET_GEN)$(CLANG) -g -O2 --target=bpf $(INCLUDES) \ + $(QUIET_GEN)$(CLANG) -g -O2 $(BPF_TARGET_ENDIAN) $(INCLUDES) \ -c $(filter %.c,$^) -o $@ && \ $(LLVM_STRIP) -g $@
diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h index 42ec5ddaab8d..42869770776e 100644 --- a/tools/include/uapi/linux/if_xdp.h +++ b/tools/include/uapi/linux/if_xdp.h @@ -127,6 +127,12 @@ struct xdp_options { */ #define XDP_TXMD_FLAGS_CHECKSUM (1 << 1)
+/* Request launch time hardware offload. The device will schedule the packet for + * transmission at a pre-determined time called launch time. The value of + * launch time is communicated via launch_time field of struct xsk_tx_metadata. + */ +#define XDP_TXMD_FLAGS_LAUNCH_TIME (1 << 2) + /* AF_XDP offloads request. 'request' union member is consumed by the driver * when the packet is being transmitted. 'completion' union member is * filled by the driver when the transmit completion arrives. @@ -142,6 +148,10 @@ struct xsk_tx_metadata { __u16 csum_start; /* Offset from csum_start where checksum should be stored. */ __u16 csum_offset; + + /* XDP_TXMD_FLAGS_LAUNCH_TIME */ + /* Launch time in nanosecond against the PTP HW Clock */ + __u64 launch_time; } request;
struct { diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h index e4be227d3ad6..4324e89a8026 100644 --- a/tools/include/uapi/linux/netdev.h +++ b/tools/include/uapi/linux/netdev.h @@ -59,10 +59,13 @@ enum netdev_xdp_rx_metadata { * by the driver. * @NETDEV_XSK_FLAGS_TX_CHECKSUM: L3 checksum HW offload is supported by the * driver. + * @NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO: Launch time HW offload is supported + * by the driver. */ enum netdev_xsk_flags { NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, + NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO = 4, };
enum netdev_queue_type { diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 48c66f3a9200..560b519f820e 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -3015,8 +3015,6 @@ static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native) .desc = "line_info", }; struct btf_ext_sec_info_param core_relo = { - .off = btf_ext->hdr->core_relo_off, - .len = btf_ext->hdr->core_relo_len, .min_rec_size = sizeof(struct bpf_core_relo), .ext_info = &btf_ext->core_relo_info, .desc = "core_relo", @@ -3034,6 +3032,8 @@ static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native) if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) return 0; /* skip core relos parsing */
+ core_relo.off = btf_ext->hdr->core_relo_off; + core_relo.len = btf_ext->hdr->core_relo_len; err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native); if (err) return err; diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index b52f71c59616..800e0ef09c37 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -2163,7 +2163,7 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
obj->sym_map[src_sym_idx] = dst_sym_idx;
- if (sym_type == STT_SECTION && dst_sym) { + if (sym_type == STT_SECTION && dst_sec) { dst_sec->sec_sym_idx = dst_sym_idx; dst_sym->st_value = 0; } diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c index 8743049e32b7..9a541762f54c 100644 --- a/tools/lib/bpf/str_error.c +++ b/tools/lib/bpf/str_error.c @@ -36,7 +36,7 @@ char *libbpf_strerror_r(int err, char *dst, int len) return dst; }
-const char *errstr(int err) +const char *libbpf_errstr(int err) { static __thread char buf[12];
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h index 66ffebde0684..53e7fbffc13e 100644 --- a/tools/lib/bpf/str_error.h +++ b/tools/lib/bpf/str_error.h @@ -7,10 +7,13 @@ char *libbpf_strerror_r(int err, char *dst, int len);
/** - * @brief **errstr()** returns string corresponding to numeric errno + * @brief **libbpf_errstr()** returns string corresponding to numeric errno * @param err negative numeric errno * @return pointer to string representation of the errno, that is invalidated * upon the next call. */ -const char *errstr(int err); +const char *libbpf_errstr(int err); + +#define errstr(err) libbpf_errstr(err) + #endif /* __LIBBPF_STR_ERROR_H */ diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c index 69b66994f2a1..02e490555966 100644 --- a/tools/objtool/arch/loongarch/decode.c +++ b/tools/objtool/arch/loongarch/decode.c @@ -5,10 +5,7 @@ #include <asm/inst.h> #include <asm/orc_types.h> #include <linux/objtool_types.h> - -#ifndef EM_LOONGARCH -#define EM_LOONGARCH 258 -#endif +#include <arch/elf.h>
int arch_ftrace_match(char *name) { @@ -363,3 +360,26 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state) state->cfa.base = CFI_SP; state->cfa.offset = 0; } + +unsigned int arch_reloc_size(struct reloc *reloc) +{ + switch (reloc_type(reloc)) { + case R_LARCH_32: + case R_LARCH_32_PCREL: + return 4; + default: + return 8; + } +} + +unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table) +{ + switch (reloc_type(reloc)) { + case R_LARCH_32_PCREL: + case R_LARCH_64_PCREL: + return reloc->sym->offset + reloc_addend(reloc) - + (reloc_offset(reloc) - reloc_offset(table)); + default: + return reloc->sym->offset + reloc_addend(reloc); + } +} diff --git a/tools/objtool/arch/loongarch/include/arch/elf.h b/tools/objtool/arch/loongarch/include/arch/elf.h index 9623d663220e..ec79062c9554 100644 --- a/tools/objtool/arch/loongarch/include/arch/elf.h +++ b/tools/objtool/arch/loongarch/include/arch/elf.h @@ -18,6 +18,13 @@ #ifndef R_LARCH_32_PCREL #define R_LARCH_32_PCREL 99 #endif +#ifndef R_LARCH_64_PCREL +#define R_LARCH_64_PCREL 109 +#endif + +#ifndef EM_LOONGARCH +#define EM_LOONGARCH 258 +#endif
#define R_NONE R_LARCH_NONE #define R_ABS32 R_LARCH_32 diff --git a/tools/objtool/arch/powerpc/decode.c b/tools/objtool/arch/powerpc/decode.c index 53b55690f320..7c0bf2429067 100644 --- a/tools/objtool/arch/powerpc/decode.c +++ b/tools/objtool/arch/powerpc/decode.c @@ -106,3 +106,17 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state) state->regs[CFI_RA].base = CFI_CFA; state->regs[CFI_RA].offset = 0; } + +unsigned int arch_reloc_size(struct reloc *reloc) +{ + switch (reloc_type(reloc)) { + case R_PPC_REL32: + case R_PPC_ADDR32: + case R_PPC_UADDR32: + case R_PPC_PLT32: + case R_PPC_PLTREL32: + return 4; + default: + return 8; + } +} diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index fe1362c34564..fb9691a34d92 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -852,3 +852,16 @@ bool arch_is_embedded_insn(struct symbol *sym) return !strcmp(sym->name, "retbleed_return_thunk") || !strcmp(sym->name, "srso_safe_ret"); } + +unsigned int arch_reloc_size(struct reloc *reloc) +{ + switch (reloc_type(reloc)) { + case R_X86_64_32: + case R_X86_64_32S: + case R_X86_64_PC32: + case R_X86_64_PLT32: + return 4; + default: + return 8; + } +} diff --git a/tools/objtool/check.c b/tools/objtool/check.c index ce973d9d8e6d..159fb130e282 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1944,8 +1944,12 @@ static int add_special_section_alts(struct objtool_file *file) return ret; }
-static int add_jump_table(struct objtool_file *file, struct instruction *insn, - struct reloc *next_table) +__weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table) +{ + return reloc->sym->offset + reloc_addend(reloc); +} + +static int add_jump_table(struct objtool_file *file, struct instruction *insn) { unsigned long table_size = insn_jump_table_size(insn); struct symbol *pfunc = insn_func(insn)->pfunc; @@ -1954,6 +1958,7 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn, unsigned int prev_offset = 0; struct reloc *reloc = table; struct alternative *alt; + unsigned long sym_offset;
/* * Each @reloc is a switch table relocation which points to the target @@ -1964,16 +1969,17 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn, /* Check for the end of the table: */ if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size) break; - if (reloc != table && reloc == next_table) + if (reloc != table && is_jump_table(reloc)) break;
/* Make sure the table entries are consecutive: */ - if (prev_offset && reloc_offset(reloc) != prev_offset + 8) + if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc)) break;
+ sym_offset = arch_jump_table_sym_offset(reloc, table); + /* Detect function pointers from contiguous objects: */ - if (reloc->sym->sec == pfunc->sec && - reloc_addend(reloc) == pfunc->offset) + if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset) break;
/* @@ -1981,10 +1987,10 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn, * which point to the end of the function. Ignore them. */ if (reloc->sym->sec == pfunc->sec && - reloc_addend(reloc) == pfunc->offset + pfunc->len) + sym_offset == pfunc->offset + pfunc->len) goto next;
- dest_insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc)); + dest_insn = find_insn(file, reloc->sym->sec, sym_offset); if (!dest_insn) break;
@@ -2023,6 +2029,7 @@ static void find_jump_table(struct objtool_file *file, struct symbol *func, struct reloc *table_reloc; struct instruction *dest_insn, *orig_insn = insn; unsigned long table_size; + unsigned long sym_offset;
/* * Backward search using the @first_jump_src links, these help avoid @@ -2046,12 +2053,17 @@ static void find_jump_table(struct objtool_file *file, struct symbol *func, table_reloc = arch_find_switch_table(file, insn, &table_size); if (!table_reloc) continue; - dest_insn = find_insn(file, table_reloc->sym->sec, reloc_addend(table_reloc)); + + sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc); + + dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset); if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func) continue;
+ set_jump_table(table_reloc); orig_insn->_jump_table = table_reloc; orig_insn->_jump_table_size = table_size; + break; } } @@ -2093,31 +2105,20 @@ static void mark_func_jump_tables(struct objtool_file *file, static int add_func_jump_tables(struct objtool_file *file, struct symbol *func) { - struct instruction *insn, *insn_t1 = NULL, *insn_t2; - int ret = 0; + struct instruction *insn; + int ret;
func_for_each_insn(file, func, insn) { if (!insn_jump_table(insn)) continue;
- if (!insn_t1) { - insn_t1 = insn; - continue; - }
- insn_t2 = insn; - - ret = add_jump_table(file, insn_t1, insn_jump_table(insn_t2)); + ret = add_jump_table(file, insn); if (ret) return ret; - - insn_t1 = insn_t2; }
- if (insn_t1) - ret = add_jump_table(file, insn_t1, NULL); - - return ret; + return 0; }
/* @@ -4008,7 +4009,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio * It may also insert a UD2 after calling a __noreturn function. */ prev_insn = prev_insn_same_sec(file, insn); - if (prev_insn->dead_end && + if (prev_insn && prev_insn->dead_end && (insn->type == INSN_BUG || (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && insn->jump_dest->type == INSN_BUG))) @@ -4449,35 +4450,6 @@ static int validate_sls(struct objtool_file *file) return warnings; }
-static bool ignore_noreturn_call(struct instruction *insn) -{ - struct symbol *call_dest = insn_call_dest(insn); - - /* - * FIXME: hack, we need a real noreturn solution - * - * Problem is, exc_double_fault() may or may not return, depending on - * whether CONFIG_X86_ESPFIX64 is set. But objtool has no visibility - * to the kernel config. - * - * Other potential ways to fix it: - * - * - have compiler communicate __noreturn functions somehow - * - remove CONFIG_X86_ESPFIX64 - * - read the .config file - * - add a cmdline option - * - create a generic objtool annotation format (vs a bunch of custom - * formats) and annotate it - */ - if (!strcmp(call_dest->name, "exc_double_fault")) { - /* prevent further unreachable warnings for the caller */ - insn->sym->warned = 1; - return true; - } - - return false; -} - static int validate_reachable_instructions(struct objtool_file *file) { struct instruction *insn, *prev_insn; @@ -4494,7 +4466,7 @@ static int validate_reachable_instructions(struct objtool_file *file) prev_insn = prev_insn_same_sec(file, insn); if (prev_insn && prev_insn->dead_end) { call_dest = insn_call_dest(prev_insn); - if (call_dest && !ignore_noreturn_call(prev_insn)) { + if (call_dest) { WARN_INSN(insn, "%s() is missing a __noreturn annotation", call_dest->name); warnings++; @@ -4517,6 +4489,8 @@ static int disas_funcs(const char *funcs) char *cmd;
cross_compile = getenv("CROSS_COMPILE"); + if (!cross_compile) + cross_compile = "";
objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '" "BEGIN { split(_funcs, funcs); }" diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 6f64d611faea..934855be631c 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -583,7 +583,7 @@ static int elf_update_sym_relocs(struct elf *elf, struct symbol *sym) { struct reloc *reloc;
- for (reloc = sym->relocs; reloc; reloc = reloc->sym_next_reloc) + for (reloc = sym->relocs; reloc; reloc = sym_next_reloc(reloc)) set_reloc_sym(elf, reloc, reloc->sym->idx);
return 0; @@ -880,7 +880,7 @@ static struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec, set_reloc_addend(elf, reloc, addend);
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc)); - reloc->sym_next_reloc = sym->relocs; + set_sym_next_reloc(reloc, sym->relocs); sym->relocs = reloc;
return reloc; @@ -979,7 +979,7 @@ static int read_relocs(struct elf *elf) }
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc)); - reloc->sym_next_reloc = sym->relocs; + set_sym_next_reloc(reloc, sym->relocs); sym->relocs = reloc;
nr_reloc++; diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h index d63b46a19f39..089a1acc48a8 100644 --- a/tools/objtool/include/objtool/arch.h +++ b/tools/objtool/include/objtool/arch.h @@ -97,4 +97,7 @@ int arch_rewrite_retpolines(struct objtool_file *file);
bool arch_pc_relative_reloc(struct reloc *reloc);
+unsigned int arch_reloc_size(struct reloc *reloc); +unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table); + #endif /* _ARCH_H */ diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index d7e815c2fd15..764cba535f22 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -77,7 +77,7 @@ struct reloc { struct elf_hash_node hash; struct section *sec; struct symbol *sym; - struct reloc *sym_next_reloc; + unsigned long _sym_next_reloc; };
struct elf { @@ -297,6 +297,31 @@ static inline void set_reloc_type(struct elf *elf, struct reloc *reloc, unsigned mark_sec_changed(elf, reloc->sec, true); }
+#define RELOC_JUMP_TABLE_BIT 1UL + +/* Does reloc mark the beginning of a jump table? */ +static inline bool is_jump_table(struct reloc *reloc) +{ + return reloc->_sym_next_reloc & RELOC_JUMP_TABLE_BIT; +} + +static inline void set_jump_table(struct reloc *reloc) +{ + reloc->_sym_next_reloc |= RELOC_JUMP_TABLE_BIT; +} + +static inline struct reloc *sym_next_reloc(struct reloc *reloc) +{ + return (struct reloc *)(reloc->_sym_next_reloc & ~RELOC_JUMP_TABLE_BIT); +} + +static inline void set_sym_next_reloc(struct reloc *reloc, struct reloc *next) +{ + unsigned long bit = reloc->_sym_next_reloc & RELOC_JUMP_TABLE_BIT; + + reloc->_sym_next_reloc = (unsigned long)next | bit; +} + #define for_each_sec(file, sec) \ list_for_each_entry(sec, &file->elf->sections, list)
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index a148ca9efca9..23dbb6bb91cf 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -497,13 +497,14 @@ ifeq ($(feature-setns), 1) $(call detected,CONFIG_SETNS) endif
+ifeq ($(feature-reallocarray), 0) + CFLAGS += -DCOMPAT_NEED_REALLOCARRAY +endif + ifdef CORESIGHT $(call feature_check,libopencsd) ifeq ($(feature-libopencsd), 1) CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS) - ifeq ($(feature-reallocarray), 0) - CFLAGS += -DCOMPAT_NEED_REALLOCARRAY - endif LDFLAGS += $(LIBOPENCSD_LDFLAGS) EXTLIBS += $(OPENCSDLIBS) $(call detected,CONFIG_LIBOPENCSD) @@ -1103,9 +1104,6 @@ ifndef NO_AUXTRACE ifndef NO_AUXTRACE $(call detected,CONFIG_AUXTRACE) CFLAGS += -DHAVE_AUXTRACE_SUPPORT - ifeq ($(feature-reallocarray), 0) - CFLAGS += -DCOMPAT_NEED_REALLOCARRAY - endif endif endif
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 05c083bb1122..eea8877c7cba 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -158,7 +158,7 @@ ifneq ($(OUTPUT),) VPATH += $(OUTPUT) export VPATH # create symlink to the original source -SOURCE := $(shell ln -sf $(srctree)/tools/perf $(OUTPUT)/source) +SOURCE := $(shell ln -sfn $(srctree)/tools/perf $(OUTPUT)/source) endif
# Do not use make's built-in rules diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c index c7df534dbf8f..0be74f048f96 100644 --- a/tools/perf/arch/powerpc/util/header.c +++ b/tools/perf/arch/powerpc/util/header.c @@ -14,8 +14,8 @@
static bool is_compat_mode(void) { - u64 base_platform = getauxval(AT_BASE_PLATFORM); - u64 platform = getauxval(AT_PLATFORM); + unsigned long base_platform = getauxval(AT_BASE_PLATFORM); + unsigned long platform = getauxval(AT_PLATFORM);
if (!strcmp((char *)platform, (char *)base_platform)) return false; diff --git a/tools/perf/arch/x86/util/topdown.c b/tools/perf/arch/x86/util/topdown.c index f63747d0abdf..d1c654839049 100644 --- a/tools/perf/arch/x86/util/topdown.c +++ b/tools/perf/arch/x86/util/topdown.c @@ -81,7 +81,7 @@ bool arch_topdown_sample_read(struct evsel *leader) */ evlist__for_each_entry(leader->evlist, evsel) { if (evsel->core.leader != leader->core.leader) - return false; + continue; if (evsel != leader && arch_is_topdown_metrics(evsel)) return true; } diff --git a/tools/perf/bench/syscall.c b/tools/perf/bench/syscall.c index ea4dfc07cbd6..e7dc216f717f 100644 --- a/tools/perf/bench/syscall.c +++ b/tools/perf/bench/syscall.c @@ -22,8 +22,7 @@ #define __NR_fork -1 #endif
-#define LOOPS_DEFAULT 10000000 -static int loops = LOOPS_DEFAULT; +static int loops;
static const struct option options[] = { OPT_INTEGER('l', "loop", &loops, "Specify number of loops"), @@ -80,6 +79,18 @@ static int bench_syscall_common(int argc, const char **argv, int syscall) const char *name = NULL; int i;
+ switch (syscall) { + case __NR_fork: + case __NR_execve: + /* Limit default loop to 10000 times to save time */ + loops = 10000; + break; + default: + loops = 10000000; + break; + } + + /* Options -l and --loops override default above */ argc = parse_options(argc, argv, options, bench_syscall_usage, 0);
gettimeofday(&start, NULL); @@ -94,16 +105,9 @@ static int bench_syscall_common(int argc, const char **argv, int syscall) break; case __NR_fork: test_fork(); - /* Only loop 10000 times to save time */ - if (i == 10000) - loops = 10000; break; case __NR_execve: test_execve(); - /* Only loop 10000 times to save time */ - if (i == 10000) - loops = 10000; - break; default: break; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f5fbd670d619..19175fe9a8b1 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -1553,12 +1553,12 @@ int cmd_report(int argc, const char **argv) input_name = "perf.data"; }
+repeat: data.path = input_name; data.force = symbol_conf.force;
symbol_conf.skip_empty = report.skip_empty;
-repeat: perf_tool__init(&report.tool, ordered_events); report.tool.sample = process_sample_event; report.tool.mmap = perf_event__process_mmap; @@ -1719,22 +1719,24 @@ int cmd_report(int argc, const char **argv) symbol_conf.annotate_data_sample = true; }
- if (sort_order && strstr(sort_order, "ipc")) { - parse_options_usage(report_usage, options, "s", 1); - goto error; - } - - if (sort_order && strstr(sort_order, "symbol")) { - if (sort__mode == SORT_MODE__BRANCH) { - snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s", - sort_order, "ipc_lbr"); - report.symbol_ipc = true; - } else { - snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s", - sort_order, "ipc_null"); + if (last_key != K_SWITCH_INPUT_DATA) { + if (sort_order && strstr(sort_order, "ipc")) { + parse_options_usage(report_usage, options, "s", 1); + goto error; }
- sort_order = sort_tmp; + if (sort_order && strstr(sort_order, "symbol")) { + if (sort__mode == SORT_MODE__BRANCH) { + snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s", + sort_order, "ipc_lbr"); + report.symbol_ipc = true; + } else { + snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s", + sort_order, "ipc_null"); + } + + sort_order = sort_tmp; + } }
if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) && diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json index c5d1d22bd034..5228f94a793f 100644 --- a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json @@ -229,19 +229,19 @@ }, { "MetricName": "slots_lost_misspeculation_fraction", - "MetricExpr": "(OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots)", + "MetricExpr": "100 * (OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots)", "BriefDescription": "Fraction of slots lost due to misspeculation", "DefaultMetricgroupName": "TopdownL1", "MetricGroup": "Default;TopdownL1", - "ScaleUnit": "100percent of slots" + "ScaleUnit": "1percent of slots" }, { "MetricName": "retired_fraction", - "MetricExpr": "OP_RETIRED / (CPU_CYCLES * #slots)", + "MetricExpr": "100 * OP_RETIRED / (CPU_CYCLES * #slots)", "BriefDescription": "Fraction of slots retiring, useful work", "DefaultMetricgroupName": "TopdownL1", "MetricGroup": "Default;TopdownL1", - "ScaleUnit": "100percent of slots" + "ScaleUnit": "1percent of slots" }, { "MetricName": "backend_core", @@ -266,7 +266,7 @@ }, { "MetricName": "frontend_bandwidth", - "MetricExpr": "frontend_bound - frontend_latency", + "MetricExpr": "frontend_bound - 100 * frontend_latency", "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)", "MetricGroup": "TopdownL2", "ScaleUnit": "1percent of slots" diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c index 1c7a2cfa321f..0cb7ba7912e8 100644 --- a/tools/perf/pmu-events/empty-pmu-events.c +++ b/tools/perf/pmu-events/empty-pmu-events.c @@ -422,7 +422,7 @@ int pmu_events_table__for_each_event(const struct pmu_events_table *table, const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; int ret;
- if (pmu && !pmu__name_match(pmu, pmu_name)) + if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name)) continue;
ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data); @@ -443,7 +443,7 @@ int pmu_events_table__find_event(const struct pmu_events_table *table, const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; int ret;
- if (!pmu__name_match(pmu, pmu_name)) + if (!perf_pmu__name_wildcard_match(pmu, pmu_name)) continue;
ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data); @@ -462,7 +462,7 @@ size_t pmu_events_table__num_events(const struct pmu_events_table *table, const struct pmu_table_entry *table_pmu = &table->pmus[i]; const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
- if (pmu__name_match(pmu, pmu_name)) + if (perf_pmu__name_wildcard_match(pmu, pmu_name)) count += table_pmu->num_entries; } return count; @@ -581,7 +581,7 @@ const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu) const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i]; const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
- if (pmu__name_match(pmu, pmu_name)) + if (perf_pmu__name_wildcard_match(pmu, pmu_name)) return &map->event_table; } return NULL; diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index 3e204700b59a..7499a35bfadd 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -945,7 +945,7 @@ int pmu_events_table__for_each_event(const struct pmu_events_table *table, const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; int ret;
- if (pmu && !pmu__name_match(pmu, pmu_name)) + if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name)) continue;
ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data); @@ -966,7 +966,7 @@ int pmu_events_table__find_event(const struct pmu_events_table *table, const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; int ret;
- if (!pmu__name_match(pmu, pmu_name)) + if (!perf_pmu__name_wildcard_match(pmu, pmu_name)) continue;
ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data); @@ -985,7 +985,7 @@ size_t pmu_events_table__num_events(const struct pmu_events_table *table, const struct pmu_table_entry *table_pmu = &table->pmus[i]; const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
- if (pmu__name_match(pmu, pmu_name)) + if (perf_pmu__name_wildcard_match(pmu, pmu_name)) count += table_pmu->num_entries; } return count; @@ -1104,7 +1104,7 @@ const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu) const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i]; const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
- if (pmu__name_match(pmu, pmu_name)) + if (perf_pmu__name_wildcard_match(pmu, pmu_name)) return &map->event_table; } return NULL; diff --git a/tools/perf/tests/hwmon_pmu.c b/tools/perf/tests/hwmon_pmu.c index d2b066a2b557..0837aca1cdfa 100644 --- a/tools/perf/tests/hwmon_pmu.c +++ b/tools/perf/tests/hwmon_pmu.c @@ -13,17 +13,23 @@ static const struct test_event { const char *name; const char *alias; - long config; + union hwmon_pmu_event_key key; } test_events[] = { { "temp_test_hwmon_event1", "temp1", - 0xA0001, + .key = { + .num = 1, + .type = 10 + }, }, { "temp_test_hwmon_event2", "temp2", - 0xA0002, + .key = { + .num = 2, + .type = 10 + }, }, };
@@ -183,11 +189,11 @@ static int do_test(size_t i, bool with_pmu, bool with_alias) strcmp(evsel->pmu->name, "hwmon_a_test_hwmon_pmu")) continue;
- if (evsel->core.attr.config != (u64)test_events[i].config) { + if (evsel->core.attr.config != (u64)test_events[i].key.type_and_num) { pr_debug("FAILED %s:%d Unexpected config for '%s', %lld != %ld\n", __FILE__, __LINE__, str, evsel->core.attr.config, - test_events[i].config); + test_events[i].key.type_and_num); ret = TEST_FAIL; goto out; } diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c index 6a681e3fb552..4a9f8e090cf4 100644 --- a/tools/perf/tests/pmu.c +++ b/tools/perf/tests/pmu.c @@ -452,9 +452,9 @@ static int test__name_cmp(struct test_suite *test __maybe_unused, int subtest __ }
/** - * Test perf_pmu__match() that's used to search for a PMU given a name passed + * Test perf_pmu__wildcard_match() that's used to search for a PMU given a name passed * on the command line. The name that's passed may also be a filename type glob - * match. If the name does not match, perf_pmu__match() attempts to match the + * match. If the name does not match, perf_pmu__wildcard_match() attempts to match the * alias of the PMU, if provided. */ static int test__pmu_match(struct test_suite *test __maybe_unused, int subtest __maybe_unused) @@ -463,41 +463,44 @@ static int test__pmu_match(struct test_suite *test __maybe_unused, int subtest _ .name = "pmuname", };
- TEST_ASSERT_EQUAL("Exact match", perf_pmu__match(&test_pmu, "pmuname"), true); - TEST_ASSERT_EQUAL("Longer token", perf_pmu__match(&test_pmu, "longertoken"), false); - TEST_ASSERT_EQUAL("Shorter token", perf_pmu__match(&test_pmu, "pmu"), false); +#define TEST_PMU_MATCH(msg, to_match, expect) \ + TEST_ASSERT_EQUAL(msg, perf_pmu__wildcard_match(&test_pmu, to_match), expect) + + TEST_PMU_MATCH("Exact match", "pmuname", true); + TEST_PMU_MATCH("Longer token", "longertoken", false); + TEST_PMU_MATCH("Shorter token", "pmu", false);
test_pmu.name = "pmuname_10"; - TEST_ASSERT_EQUAL("Diff suffix_", perf_pmu__match(&test_pmu, "pmuname_2"), false); - TEST_ASSERT_EQUAL("Sub suffix_", perf_pmu__match(&test_pmu, "pmuname_1"), true); - TEST_ASSERT_EQUAL("Same suffix_", perf_pmu__match(&test_pmu, "pmuname_10"), true); - TEST_ASSERT_EQUAL("No suffix_", perf_pmu__match(&test_pmu, "pmuname"), true); - TEST_ASSERT_EQUAL("Underscore_", perf_pmu__match(&test_pmu, "pmuname_"), true); - TEST_ASSERT_EQUAL("Substring_", perf_pmu__match(&test_pmu, "pmuna"), false); + TEST_PMU_MATCH("Diff suffix_", "pmuname_2", false); + TEST_PMU_MATCH("Sub suffix_", "pmuname_1", true); + TEST_PMU_MATCH("Same suffix_", "pmuname_10", true); + TEST_PMU_MATCH("No suffix_", "pmuname", true); + TEST_PMU_MATCH("Underscore_", "pmuname_", true); + TEST_PMU_MATCH("Substring_", "pmuna", false);
test_pmu.name = "pmuname_ab23"; - TEST_ASSERT_EQUAL("Diff suffix hex_", perf_pmu__match(&test_pmu, "pmuname_2"), false); - TEST_ASSERT_EQUAL("Sub suffix hex_", perf_pmu__match(&test_pmu, "pmuname_ab"), true); - TEST_ASSERT_EQUAL("Same suffix hex_", perf_pmu__match(&test_pmu, "pmuname_ab23"), true); - TEST_ASSERT_EQUAL("No suffix hex_", perf_pmu__match(&test_pmu, "pmuname"), true); - TEST_ASSERT_EQUAL("Underscore hex_", perf_pmu__match(&test_pmu, "pmuname_"), true); - TEST_ASSERT_EQUAL("Substring hex_", perf_pmu__match(&test_pmu, "pmuna"), false); + TEST_PMU_MATCH("Diff suffix hex_", "pmuname_2", false); + TEST_PMU_MATCH("Sub suffix hex_", "pmuname_ab", true); + TEST_PMU_MATCH("Same suffix hex_", "pmuname_ab23", true); + TEST_PMU_MATCH("No suffix hex_", "pmuname", true); + TEST_PMU_MATCH("Underscore hex_", "pmuname_", true); + TEST_PMU_MATCH("Substring hex_", "pmuna", false);
test_pmu.name = "pmuname10"; - TEST_ASSERT_EQUAL("Diff suffix", perf_pmu__match(&test_pmu, "pmuname2"), false); - TEST_ASSERT_EQUAL("Sub suffix", perf_pmu__match(&test_pmu, "pmuname1"), true); - TEST_ASSERT_EQUAL("Same suffix", perf_pmu__match(&test_pmu, "pmuname10"), true); - TEST_ASSERT_EQUAL("No suffix", perf_pmu__match(&test_pmu, "pmuname"), true); - TEST_ASSERT_EQUAL("Underscore", perf_pmu__match(&test_pmu, "pmuname_"), false); - TEST_ASSERT_EQUAL("Substring", perf_pmu__match(&test_pmu, "pmuna"), false); + TEST_PMU_MATCH("Diff suffix", "pmuname2", false); + TEST_PMU_MATCH("Sub suffix", "pmuname1", true); + TEST_PMU_MATCH("Same suffix", "pmuname10", true); + TEST_PMU_MATCH("No suffix", "pmuname", true); + TEST_PMU_MATCH("Underscore", "pmuname_", false); + TEST_PMU_MATCH("Substring", "pmuna", false);
test_pmu.name = "pmunameab23"; - TEST_ASSERT_EQUAL("Diff suffix hex", perf_pmu__match(&test_pmu, "pmuname2"), false); - TEST_ASSERT_EQUAL("Sub suffix hex", perf_pmu__match(&test_pmu, "pmunameab"), true); - TEST_ASSERT_EQUAL("Same suffix hex", perf_pmu__match(&test_pmu, "pmunameab23"), true); - TEST_ASSERT_EQUAL("No suffix hex", perf_pmu__match(&test_pmu, "pmuname"), true); - TEST_ASSERT_EQUAL("Underscore hex", perf_pmu__match(&test_pmu, "pmuname_"), false); - TEST_ASSERT_EQUAL("Substring hex", perf_pmu__match(&test_pmu, "pmuna"), false); + TEST_PMU_MATCH("Diff suffix hex", "pmuname2", false); + TEST_PMU_MATCH("Sub suffix hex", "pmunameab", true); + TEST_PMU_MATCH("Same suffix hex", "pmunameab23", true); + TEST_PMU_MATCH("No suffix hex", "pmuname", true); + TEST_PMU_MATCH("Underscore hex", "pmuname_", false); + TEST_PMU_MATCH("Substring hex", "pmuna", false);
/* * 2 hex chars or less are not considered suffixes so it shouldn't be @@ -505,7 +508,7 @@ static int test__pmu_match(struct test_suite *test __maybe_unused, int subtest _ * false results here than above. */ test_pmu.name = "pmuname_a3"; - TEST_ASSERT_EQUAL("Diff suffix 2 hex_", perf_pmu__match(&test_pmu, "pmuname_2"), false); + TEST_PMU_MATCH("Diff suffix 2 hex_", "pmuname_2", false); /* * This one should be false, but because pmuname_a3 ends in 3 which is * decimal, it's not possible to determine if it's a short hex suffix or @@ -513,19 +516,19 @@ static int test__pmu_match(struct test_suite *test __maybe_unused, int subtest _ * length of decimal suffix. Run the test anyway and expect the wrong * result. And slightly fuzzy matching shouldn't do too much harm. */ - TEST_ASSERT_EQUAL("Sub suffix 2 hex_", perf_pmu__match(&test_pmu, "pmuname_a"), true); - TEST_ASSERT_EQUAL("Same suffix 2 hex_", perf_pmu__match(&test_pmu, "pmuname_a3"), true); - TEST_ASSERT_EQUAL("No suffix 2 hex_", perf_pmu__match(&test_pmu, "pmuname"), false); - TEST_ASSERT_EQUAL("Underscore 2 hex_", perf_pmu__match(&test_pmu, "pmuname_"), false); - TEST_ASSERT_EQUAL("Substring 2 hex_", perf_pmu__match(&test_pmu, "pmuna"), false); + TEST_PMU_MATCH("Sub suffix 2 hex_", "pmuname_a", true); + TEST_PMU_MATCH("Same suffix 2 hex_", "pmuname_a3", true); + TEST_PMU_MATCH("No suffix 2 hex_", "pmuname", false); + TEST_PMU_MATCH("Underscore 2 hex_", "pmuname_", false); + TEST_PMU_MATCH("Substring 2 hex_", "pmuna", false);
test_pmu.name = "pmuname_5"; - TEST_ASSERT_EQUAL("Glob 1", perf_pmu__match(&test_pmu, "pmu*"), true); - TEST_ASSERT_EQUAL("Glob 2", perf_pmu__match(&test_pmu, "nomatch*"), false); - TEST_ASSERT_EQUAL("Seq 1", perf_pmu__match(&test_pmu, "pmuname_[12345]"), true); - TEST_ASSERT_EQUAL("Seq 2", perf_pmu__match(&test_pmu, "pmuname_[67890]"), false); - TEST_ASSERT_EQUAL("? 1", perf_pmu__match(&test_pmu, "pmuname_?"), true); - TEST_ASSERT_EQUAL("? 2", perf_pmu__match(&test_pmu, "pmuname_1?"), false); + TEST_PMU_MATCH("Glob 1", "pmu*", true); + TEST_PMU_MATCH("Glob 2", "nomatch*", false); + TEST_PMU_MATCH("Seq 1", "pmuname_[12345]", true); + TEST_PMU_MATCH("Seq 2", "pmuname_[67890]", false); + TEST_PMU_MATCH("? 1", "pmuname_?", true); + TEST_PMU_MATCH("? 2", "pmuname_1?", false);
return TEST_OK; } diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S index 75cf084a927d..577760046772 100644 --- a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S +++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S @@ -26,3 +26,5 @@ skip: mov x0, #0 mov x8, #93 // __NR_exit syscall svc #0 + +.section .note.GNU-stack, "", @progbits diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh index 1b58ccc1fd88..4d6c3c1b7fb9 100755 --- a/tools/perf/tests/shell/record_bpf_filter.sh +++ b/tools/perf/tests/shell/record_bpf_filter.sh @@ -89,7 +89,7 @@ test_bpf_filter_fail() { test_bpf_filter_group() { echo "Group bpf-filter test"
- if ! perf record -e task-clock --filter 'period > 1000 || ip > 0' \ + if ! perf record -e task-clock --filter 'period > 1000, ip > 0' \ -o /dev/null true 2>/dev/null then echo "Group bpf-filter test [Failed should succeed]" @@ -97,7 +97,7 @@ test_bpf_filter_group() { return fi
- if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \ + if ! perf record -e task-clock --filter 'period > 1000 , cpu > 0 || ip > 0' \ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU then echo "Group bpf-filter test [Failed forbidden CPU]" diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh index 8b148b300be1..9c466c0efa85 100755 --- a/tools/perf/tests/shell/stat_all_pmu.sh +++ b/tools/perf/tests/shell/stat_all_pmu.sh @@ -2,7 +2,6 @@ # perf all PMU test (exclusive) # SPDX-License-Identifier: GPL-2.0
-set -e err=0 result=""
@@ -16,34 +15,55 @@ trap trap_cleanup EXIT TERM INT # Test all PMU events; however exclude parameterized ones (name contains '?') for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]+?[[:graph:]]+[[:space:]]//g') do - echo "Testing $p" - result=$(perf stat -e "$p" true 2>&1) - if echo "$result" | grep -q "$p" + echo -n "Testing $p -- " + output=$(perf stat -e "$p" true 2>&1) + stat_result=$? + if echo "$output" | grep -q "$p" then # Event seen in output. - continue - fi - if echo "$result" | grep -q "<not supported>" - then - # Event not supported, so ignore. - continue + if [ $stat_result -eq 0 ] && ! echo "$output" | grep -q "<not supported>" + then + # Event supported. + echo "supported" + continue + elif echo "$output" | grep -q "<not supported>" + then + # Event not supported, so ignore. + echo "not supported" + continue + elif echo "$output" | grep -q "No permission to enable" + then + # No permissions, so ignore. + echo "no permission to enable" + continue + elif echo "$output" | grep -q "Bad event name" + then + # Non-existent event. + echo "Error: Bad event name" + echo "$output" + err=1 + continue + fi fi - if echo "$result" | grep -q "Access to performance monitoring and observability operations is limited." + + if echo "$output" | grep -q "Access to performance monitoring and observability operations is limited." then # Access is limited, so ignore. + echo "access limited" continue fi
# We failed to see the event and it is supported. Possibly the workload was # too small so retry with something longer. - result=$(perf stat -e "$p" perf bench internals synthesize 2>&1) - if echo "$result" | grep -q "$p" + output=$(perf stat -e "$p" perf bench internals synthesize 2>&1) + if echo "$output" | grep -q "$p" then # Event seen in output. + echo "supported" continue fi echo "Error: event '$p' not printed in:" - echo "$result" + echo "$output" err=1 done
diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh index c86da0235059..7da606db97cb 100755 --- a/tools/perf/tests/shell/test_data_symbol.sh +++ b/tools/perf/tests/shell/test_data_symbol.sh @@ -18,7 +18,7 @@ skip_if_no_mem_event() {
skip_if_no_mem_event || exit 2
-skip_test_missing_symbol buf1 +skip_test_missing_symbol workload_datasym_buf1
TEST_PROGRAM="perf test -w datasym" PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX) @@ -26,18 +26,19 @@ ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX)
check_result() { # The memory report format is as below: - # 99.92% ... [.] buf1+0x38 + # 99.92% ... [.] workload_datasym_buf1+0x38 result=$(perf mem report -i ${PERF_DATA} -s symbol_daddr -q 2>&1 | - awk '/buf1/ { print $4 }') + awk '/workload_datasym_buf1/ { print $4 }')
- # Testing is failed if has no any sample for "buf1" + # Testing is failed if has no any sample for "workload_datasym_buf1" [ -z "$result" ] && return 1
while IFS= read -r line; do - # The "data1" and "data2" fields in structure "buf1" have - # offset "0x0" and "0x38", returns failure if detect any - # other offset value. - if [ "$line" != "buf1+0x0" ] && [ "$line" != "buf1+0x38" ]; then + # The "data1" and "data2" fields in structure + # "workload_datasym_buf1" have offset "0x0" and "0x38", returns + # failure if detect any other offset value. + if [ "$line" != "workload_datasym_buf1+0x0" ] && \ + [ "$line" != "workload_datasym_buf1+0x38" ]; then return 1 fi done <<< "$result" diff --git a/tools/perf/tests/tool_pmu.c b/tools/perf/tests/tool_pmu.c index 187942b749b7..1e900ef92e37 100644 --- a/tools/perf/tests/tool_pmu.c +++ b/tools/perf/tests/tool_pmu.c @@ -27,7 +27,7 @@ static int do_test(enum tool_pmu_event ev, bool with_pmu) parse_events_error__init(&err); ret = parse_events(evlist, str, &err); if (ret) { - if (tool_pmu__skip_event(tool_pmu__event_to_str(ev))) { + if (!tool_pmu__event_to_str(ev)) { ret = TEST_OK; goto out; } @@ -59,7 +59,7 @@ static int do_test(enum tool_pmu_event ev, bool with_pmu) } }
- if (!found && !tool_pmu__skip_event(tool_pmu__event_to_str(ev))) { + if (!found && tool_pmu__event_to_str(ev)) { pr_debug("FAILED %s:%d Didn't find tool event '%s' in parsed evsels\n", __FILE__, __LINE__, str); ret = TEST_FAIL; diff --git a/tools/perf/tests/workloads/datasym.c b/tools/perf/tests/workloads/datasym.c index 8e08fc75a973..1d0b7d64e1ba 100644 --- a/tools/perf/tests/workloads/datasym.c +++ b/tools/perf/tests/workloads/datasym.c @@ -1,3 +1,6 @@ +#include <stdlib.h> +#include <signal.h> +#include <unistd.h> #include <linux/compiler.h> #include "../tests.h"
@@ -7,16 +10,33 @@ typedef struct _buf { char data2; } buf __attribute__((aligned(64)));
-static buf buf1 = { +/* volatile to try to avoid the compiler seeing reserved as unused. */ +static volatile buf workload_datasym_buf1 = { /* to have this in the data section */ .reserved[0] = 1, };
-static int datasym(int argc __maybe_unused, const char **argv __maybe_unused) +static volatile sig_atomic_t done; + +static void sighandler(int sig __maybe_unused) +{ + done = 1; +} + +static int datasym(int argc, const char **argv) { - for (;;) { - buf1.data1++; - if (buf1.data1 == 123) { + int sec = 1; + + if (argc > 0) + sec = atoi(argv[0]); + + signal(SIGINT, sighandler); + signal(SIGALRM, sighandler); + alarm(sec); + + while (!done) { + workload_datasym_buf1.data1++; + if (workload_datasym_buf1.data1 == 123) { /* * Add some 'noise' in the loop to work around errata * 1694299 on Arm N1. @@ -30,9 +50,9 @@ static int datasym(int argc __maybe_unused, const char **argv __maybe_unused) * longer a continuous repeating pattern that interacts * badly with the bias. */ - buf1.data1++; + workload_datasym_buf1.data1++; } - buf1.data2 += buf1.data1; + workload_datasym_buf1.data2 += workload_datasym_buf1.data1; } return 0; } diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 12761c39788f..f1365ce69ba0 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -37,6 +37,8 @@ #include "../../arch/arm64/include/asm/cputype.h" #define MAX_TIMESTAMP (~0ULL)
+#define is_ldst_op(op) (!!((op) & ARM_SPE_OP_LDST)) + struct arm_spe { struct auxtrace auxtrace; struct auxtrace_queues queues; @@ -669,6 +671,10 @@ static u64 arm_spe__synth_data_source(struct arm_spe_queue *speq, { union perf_mem_data_src data_src = { .mem_op = PERF_MEM_OP_NA };
+ /* Only synthesize data source for LDST operations */ + if (!is_ldst_op(record->op)) + return 0; + if (record->op & ARM_SPE_OP_LD) data_src.mem_op = PERF_MEM_OP_LOAD; else if (record->op & ARM_SPE_OP_ST) @@ -767,7 +773,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq) * When data_src is zero it means the record is not a memory operation, * skip to synthesize memory sample for this case. */ - if (spe->sample_memory && data_src) { + if (spe->sample_memory && is_ldst_op(record->op)) { err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src); if (err) return err; diff --git a/tools/perf/util/bpf-filter.l b/tools/perf/util/bpf-filter.l index f313404f95a9..6aa65ade3385 100644 --- a/tools/perf/util/bpf-filter.l +++ b/tools/perf/util/bpf-filter.l @@ -76,7 +76,7 @@ static int path_or_error(void) num_dec [0-9]+ num_hex 0[Xx][0-9a-fA-F]+ space [ \t]+ -path [^ \t\n]+ +path [^ \t\n,]+ ident [_a-zA-Z][_a-zA-Z0-9]+
%% diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c index 49b79cf0c5cc..8aa456d7c2cd 100644 --- a/tools/perf/util/comm.c +++ b/tools/perf/util/comm.c @@ -5,6 +5,8 @@ #include <internal/rc_check.h> #include <linux/refcount.h> #include <linux/zalloc.h> +#include <tools/libc_compat.h> // reallocarray + #include "rwsem.h"
DECLARE_RC_STRUCT(comm_str) { diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 995f6bb05b5f..f9ef7d045c92 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -46,8 +46,8 @@ int debug_type_profile; FILE *debug_file(void) { if (!_debug_file) { - pr_warning_once("debug_file not set"); debug_set_file(stderr); + pr_warning_once("debug_file not set"); } return _debug_file; } diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index bb8e8f444054..c0472a41147c 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -808,7 +808,9 @@ static inline bool dso__is_kcore(const struct dso *dso)
static inline bool dso__is_kallsyms(const struct dso *dso) { - return RC_CHK_ACCESS(dso)->kernel && RC_CHK_ACCESS(dso)->long_name[0] != '/'; + enum dso_binary_type bt = dso__binary_type(dso); + + return bt == DSO_BINARY_TYPE__KALLSYMS || bt == DSO_BINARY_TYPE__GUEST_KALLSYMS; }
bool dso__is_object_file(const struct dso *dso); diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index f0dd174e2deb..633df7d9204c 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1373,19 +1373,18 @@ static int evlist__create_syswide_maps(struct evlist *evlist) */ cpus = perf_cpu_map__new_online_cpus(); if (!cpus) - goto out; + return -ENOMEM;
threads = perf_thread_map__new_dummy(); - if (!threads) - goto out_put; + if (!threads) { + perf_cpu_map__put(cpus); + return -ENOMEM; + }
perf_evlist__set_maps(&evlist->core, cpus, threads); - perf_thread_map__put(threads); -out_put: perf_cpu_map__put(cpus); -out: - return -ENOMEM; + return 0; }
int evlist__open(struct evlist *evlist) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index bc144388f892..9cd78cdee628 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -511,6 +511,16 @@ struct evsel *evsel__clone(struct evsel *dest, struct evsel *orig) } evsel->cgrp = cgroup__get(orig->cgrp); #ifdef HAVE_LIBTRACEEVENT + if (orig->tp_sys) { + evsel->tp_sys = strdup(orig->tp_sys); + if (evsel->tp_sys == NULL) + goto out_err; + } + if (orig->tp_name) { + evsel->tp_name = strdup(orig->tp_name); + if (evsel->tp_name == NULL) + goto out_err; + } evsel->tp_format = orig->tp_format; #endif evsel->handler = orig->handler; @@ -634,7 +644,11 @@ struct tep_event *evsel__tp_format(struct evsel *evsel) if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) return NULL;
- tp_format = trace_event__tp_format(evsel->tp_sys, evsel->tp_name); + if (!evsel->tp_sys) + tp_format = trace_event__tp_format_id(evsel->core.attr.config); + else + tp_format = trace_event__tp_format(evsel->tp_sys, evsel->tp_name); + if (IS_ERR(tp_format)) { int err = -PTR_ERR(evsel->tp_format);
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c index c221dcce6666..6413537442aa 100644 --- a/tools/perf/util/expr.c +++ b/tools/perf/util/expr.c @@ -215,6 +215,8 @@ int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref) int expr__get_id(struct expr_parse_ctx *ctx, const char *id, struct expr_id_data **data) { + if (!ctx || !id) + return -1; return hashmap__find(ctx->ids, id, data) ? 0 : -1; }
diff --git a/tools/perf/util/hwmon_pmu.c b/tools/perf/util/hwmon_pmu.c index 4acb9bb19b84..acd889b2462f 100644 --- a/tools/perf/util/hwmon_pmu.c +++ b/tools/perf/util/hwmon_pmu.c @@ -107,20 +107,6 @@ struct hwmon_pmu { int hwmon_dir_fd; };
-/** - * union hwmon_pmu_event_key: Key for hwmon_pmu->events as such each key - * represents an event. - * - * Related hwmon files start <type><number> that this key represents. - */ -union hwmon_pmu_event_key { - long type_and_num; - struct { - int num :16; - enum hwmon_type type :8; - }; -}; - /** * struct hwmon_pmu_event_value: Value in hwmon_pmu->events. * diff --git a/tools/perf/util/hwmon_pmu.h b/tools/perf/util/hwmon_pmu.h index 882566846df4..b3329774d2b2 100644 --- a/tools/perf/util/hwmon_pmu.h +++ b/tools/perf/util/hwmon_pmu.h @@ -91,6 +91,22 @@ enum hwmon_item { HWMON_ITEM__MAX, };
+/** + * union hwmon_pmu_event_key: Key for hwmon_pmu->events as such each key + * represents an event. + * union is exposed for testing to ensure problems are avoided on big + * endian machines. + * + * Related hwmon files start <type><number> that this key represents. + */ +union hwmon_pmu_event_key { + long type_and_num; + struct { + int num :16; + enum hwmon_type type :8; + }; +}; + bool perf_pmu__is_hwmon(const struct perf_pmu *pmu); bool evsel__is_hwmon(const struct evsel *evsel);
diff --git a/tools/perf/util/intel-tpebs.c b/tools/perf/util/intel-tpebs.c index 50a3c3e07160..2c421b475b3b 100644 --- a/tools/perf/util/intel-tpebs.c +++ b/tools/perf/util/intel-tpebs.c @@ -254,7 +254,7 @@ int tpebs_start(struct evlist *evsel_list) new = zalloc(sizeof(*new)); if (!new) { ret = -1; - zfree(name); + zfree(&name); goto err; } new->name = name; diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 2d51badfbf2e..9c7bf17bcbe8 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1468,8 +1468,6 @@ static int machine__create_modules(struct machine *machine) if (modules__parse(modules, machine, machine__create_module)) return -1;
- maps__fixup_end(machine__kernel_maps(machine)); - if (!machine__set_modules_path(machine)) return 0;
@@ -1563,6 +1561,8 @@ int machine__create_kernel_maps(struct machine *machine) } }
+ maps__fixup_end(machine__kernel_maps(machine)); + out_put: dso__put(kernel); return ret; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 1e23faa364b1..6c36b98875bc 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1660,7 +1660,7 @@ int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */ while ((pmu = perf_pmus__scan(pmu)) != NULL) { if (!parse_events__filter_pmu(parse_state, pmu) && - perf_pmu__match(pmu, event_or_pmu)) { + perf_pmu__wildcard_match(pmu, event_or_pmu)) { bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
if (!parse_events_add_pmu(parse_state, *listp, pmu, diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index a8193ac8f2e7..72aa6167c090 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -596,7 +596,7 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name, }; if (pmu_events_table__find_event(pmu->events_table, pmu, name, update_alias, &data) == 0) - pmu->cpu_json_aliases++; + pmu->cpu_common_json_aliases++; } pmu->sysfs_aliases++; break; @@ -847,21 +847,23 @@ static size_t pmu_deduped_name_len(const struct perf_pmu *pmu, const char *name, }
/** - * perf_pmu__match_ignoring_suffix - Does the pmu_name match tok ignoring any - * trailing suffix? The Suffix must be in form - * tok_{digits}, or tok{digits}. + * perf_pmu__match_wildcard - Does the pmu_name start with tok and is then only + * followed by nothing or a suffix? tok may contain + * part of a suffix. * @pmu_name: The pmu_name with possible suffix. - * @tok: The possible match to pmu_name without suffix. + * @tok: The wildcard argument to match. */ -static bool perf_pmu__match_ignoring_suffix(const char *pmu_name, const char *tok) +static bool perf_pmu__match_wildcard(const char *pmu_name, const char *tok) { const char *p, *suffix; bool has_hex = false; + size_t tok_len = strlen(tok);
- if (strncmp(pmu_name, tok, strlen(tok))) + /* Check start of pmu_name for equality. */ + if (strncmp(pmu_name, tok, tok_len)) return false;
- suffix = p = pmu_name + strlen(tok); + suffix = p = pmu_name + tok_len; if (*p == 0) return true;
@@ -887,60 +889,84 @@ static bool perf_pmu__match_ignoring_suffix(const char *pmu_name, const char *to }
/** - * pmu_uncore_alias_match - does name match the PMU name? - * @pmu_name: the json struct pmu_event name. This may lack a suffix (which + * perf_pmu__match_ignoring_suffix_uncore - Does the pmu_name match tok ignoring + * any trailing suffix on pmu_name and + * tok? The Suffix must be in form + * tok_{digits}, or tok{digits}. + * @pmu_name: The pmu_name with possible suffix. + * @tok: The possible match to pmu_name. + */ +static bool perf_pmu__match_ignoring_suffix_uncore(const char *pmu_name, const char *tok) +{ + size_t pmu_name_len, tok_len; + + /* For robustness, check for NULL. */ + if (pmu_name == NULL) + return tok == NULL; + + /* uncore_ prefixes are ignored. */ + if (!strncmp(pmu_name, "uncore_", 7)) + pmu_name += 7; + if (!strncmp(tok, "uncore_", 7)) + tok += 7; + + pmu_name_len = pmu_name_len_no_suffix(pmu_name); + tok_len = pmu_name_len_no_suffix(tok); + if (pmu_name_len != tok_len) + return false; + + return strncmp(pmu_name, tok, pmu_name_len) == 0; +} + + +/** + * perf_pmu__match_wildcard_uncore - does to_match match the PMU's name? + * @pmu_name: The pmu->name or pmu->alias to match against. + * @to_match: the json struct pmu_event name. This may lack a suffix (which * matches) or be of the form "socket,pmuname" which will match * "socketX_pmunameY". - * @name: a real full PMU name as from sysfs. */ -static bool pmu_uncore_alias_match(const char *pmu_name, const char *name) +static bool perf_pmu__match_wildcard_uncore(const char *pmu_name, const char *to_match) { - char *tmp = NULL, *tok, *str; - bool res; - - if (strchr(pmu_name, ',') == NULL) - return perf_pmu__match_ignoring_suffix(name, pmu_name); + char *mutable_to_match, *tok, *tmp;
- str = strdup(pmu_name); - if (!str) + if (!pmu_name) return false;
- /* - * uncore alias may be from different PMU with common prefix - */ - tok = strtok_r(str, ",", &tmp); - if (strncmp(pmu_name, tok, strlen(tok))) { - res = false; - goto out; - } + /* uncore_ prefixes are ignored. */ + if (!strncmp(pmu_name, "uncore_", 7)) + pmu_name += 7; + if (!strncmp(to_match, "uncore_", 7)) + to_match += 7;
- /* - * Match more complex aliases where the alias name is a comma-delimited - * list of tokens, orderly contained in the matching PMU name. - * - * Example: For alias "socket,pmuname" and PMU "socketX_pmunameY", we - * match "socket" in "socketX_pmunameY" and then "pmuname" in - * "pmunameY". - */ - while (1) { - char *next_tok = strtok_r(NULL, ",", &tmp); + if (strchr(to_match, ',') == NULL) + return perf_pmu__match_wildcard(pmu_name, to_match);
- name = strstr(name, tok); - if (!name || - (!next_tok && !perf_pmu__match_ignoring_suffix(name, tok))) { - res = false; - goto out; + /* Process comma separated list of PMU name components. */ + mutable_to_match = strdup(to_match); + if (!mutable_to_match) + return false; + + tok = strtok_r(mutable_to_match, ",", &tmp); + while (tok) { + size_t tok_len = strlen(tok); + + if (strncmp(pmu_name, tok, tok_len)) { + /* Mismatch between part of pmu_name and tok. */ + free(mutable_to_match); + return false; } - if (!next_tok) - break; - tok = next_tok; - name += strlen(tok); + /* Move pmu_name forward over tok and suffix. */ + pmu_name += tok_len; + while (*pmu_name != '\0' && isdigit(*pmu_name)) + pmu_name++; + if (*pmu_name == '_') + pmu_name++; + + tok = strtok_r(NULL, ",", &tmp); } - - res = true; -out: - free(str); - return res; + free(mutable_to_match); + return *pmu_name == '\0'; }
bool pmu_uncore_identifier_match(const char *compat, const char *id) @@ -1003,11 +1029,19 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe, { struct perf_pmu *pmu = vdata;
- if (!pe->compat || !pe->pmu) + if (!pe->compat || !pe->pmu) { + /* No data to match. */ return 0; + } + + if (!perf_pmu__match_wildcard_uncore(pmu->name, pe->pmu) && + !perf_pmu__match_wildcard_uncore(pmu->alias_name, pe->pmu)) { + /* PMU name/alias_name don't match. */ + return 0; + }
- if (pmu_uncore_alias_match(pe->pmu, pmu->name) && - pmu_uncore_identifier_match(pe->compat, pmu->id)) { + if (pmu_uncore_identifier_match(pe->compat, pmu->id)) { + /* Id matched. */ perf_pmu__new_alias(pmu, pe->name, pe->desc, @@ -1016,7 +1050,6 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe, pe, EVENT_SRC_SYS_JSON); } - return 0; }
@@ -1851,9 +1884,10 @@ size_t perf_pmu__num_events(struct perf_pmu *pmu) if (pmu->cpu_aliases_added) nr += pmu->cpu_json_aliases; else if (pmu->events_table) - nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->cpu_json_aliases; + nr += pmu_events_table__num_events(pmu->events_table, pmu) - + pmu->cpu_common_json_aliases; else - assert(pmu->cpu_json_aliases == 0); + assert(pmu->cpu_json_aliases == 0 && pmu->cpu_common_json_aliases == 0);
if (perf_pmu__is_tool(pmu)) nr -= tool_pmu__num_skip_events(); @@ -1974,15 +2008,82 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus, return ret; }
-bool pmu__name_match(const struct perf_pmu *pmu, const char *pmu_name) +static bool perf_pmu___name_match(const struct perf_pmu *pmu, const char *to_match, bool wildcard) { - return !strcmp(pmu->name, pmu_name) || - (pmu->is_uncore && pmu_uncore_alias_match(pmu_name, pmu->name)) || + const char *names[2] = { + pmu->name, + pmu->alias_name, + }; + if (pmu->is_core) { + for (size_t i = 0; i < ARRAY_SIZE(names); i++) { + const char *name = names[i]; + + if (!name) + continue; + + if (!strcmp(name, to_match)) { + /* Exact name match. */ + return true; + } + } + if (!strcmp(to_match, "default_core")) { + /* + * jevents and tests use default_core as a marker for any core + * PMU as the PMU name varies across architectures. + */ + return true; + } + return false; + } + if (!pmu->is_uncore) { /* - * jevents and tests use default_core as a marker for any core - * PMU as the PMU name varies across architectures. + * PMU isn't core or uncore, some kind of broken CPU mask + * situation. Only match exact name. */ - (pmu->is_core && !strcmp(pmu_name, "default_core")); + for (size_t i = 0; i < ARRAY_SIZE(names); i++) { + const char *name = names[i]; + + if (!name) + continue; + + if (!strcmp(name, to_match)) { + /* Exact name match. */ + return true; + } + } + return false; + } + for (size_t i = 0; i < ARRAY_SIZE(names); i++) { + const char *name = names[i]; + + if (wildcard && perf_pmu__match_wildcard_uncore(name, to_match)) + return true; + if (!wildcard && perf_pmu__match_ignoring_suffix_uncore(name, to_match)) + return true; + } + return false; +} + +/** + * perf_pmu__name_wildcard_match - Called by the jevents generated code to see + * if pmu matches the json to_match string. + * @pmu: The pmu whose name/alias to match. + * @to_match: The possible match to pmu_name. + */ +bool perf_pmu__name_wildcard_match(const struct perf_pmu *pmu, const char *to_match) +{ + return perf_pmu___name_match(pmu, to_match, /*wildcard=*/true); +} + +/** + * perf_pmu__name_no_suffix_match - Does pmu's name match to_match ignoring any + * trailing suffix on the pmu_name and/or tok? + * @pmu: The pmu whose name/alias to match. + * @to_match: The possible match to pmu_name. + */ +bool perf_pmu__name_no_suffix_match(const struct perf_pmu *pmu, const char *to_match) +{ + return perf_pmu___name_match(pmu, to_match, /*wildcard=*/false); }
bool perf_pmu__is_software(const struct perf_pmu *pmu) @@ -2229,29 +2330,31 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, name ?: "N/A", buf, config_name, config); }
-bool perf_pmu__match(const struct perf_pmu *pmu, const char *tok) +bool perf_pmu__wildcard_match(const struct perf_pmu *pmu, const char *wildcard_to_match) { - const char *name = pmu->name; - bool need_fnmatch = strisglob(tok); + const char *names[2] = { + pmu->name, + pmu->alias_name, + }; + bool need_fnmatch = strisglob(wildcard_to_match);
- if (!strncmp(tok, "uncore_", 7)) - tok += 7; - if (!strncmp(name, "uncore_", 7)) - name += 7; + if (!strncmp(wildcard_to_match, "uncore_", 7)) + wildcard_to_match += 7;
- if (perf_pmu__match_ignoring_suffix(name, tok) || - (need_fnmatch && !fnmatch(tok, name, 0))) - return true; + for (size_t i = 0; i < ARRAY_SIZE(names); i++) { + const char *pmu_name = names[i];
- name = pmu->alias_name; - if (!name) - return false; + if (!pmu_name) + continue;
- if (!strncmp(name, "uncore_", 7)) - name += 7; + if (!strncmp(pmu_name, "uncore_", 7)) + pmu_name += 7;
- return perf_pmu__match_ignoring_suffix(name, tok) || - (need_fnmatch && !fnmatch(tok, name, 0)); + if (perf_pmu__match_wildcard(pmu_name, wildcard_to_match) || + (need_fnmatch && !fnmatch(wildcard_to_match, pmu_name, 0))) + return true; + } + return false; }
int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size) diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index dbed6c243a5e..b93014cc3670 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -37,6 +37,8 @@ struct perf_pmu_caps { };
enum { + PERF_PMU_TYPE_PE_START = 0, + PERF_PMU_TYPE_PE_END = 0xFFFEFFFF, PERF_PMU_TYPE_HWMON_START = 0xFFFF0000, PERF_PMU_TYPE_HWMON_END = 0xFFFFFFFD, PERF_PMU_TYPE_TOOL = 0xFFFFFFFE, @@ -134,6 +136,11 @@ struct perf_pmu { uint32_t cpu_json_aliases; /** @sys_json_aliases: Number of json event aliases loaded matching the PMU's identifier. */ uint32_t sys_json_aliases; + /** + * @cpu_common_json_aliases: Number of json events that overlapped with sysfs when + * loading all sysfs events. + */ + uint32_t cpu_common_json_aliases; /** @sysfs_aliases_loaded: Are sysfs aliases loaded from disk? */ bool sysfs_aliases_loaded; /** @@ -238,7 +245,8 @@ bool perf_pmu__have_event(struct perf_pmu *pmu, const char *name); size_t perf_pmu__num_events(struct perf_pmu *pmu); int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus, void *state, pmu_event_callback cb); -bool pmu__name_match(const struct perf_pmu *pmu, const char *pmu_name); +bool perf_pmu__name_wildcard_match(const struct perf_pmu *pmu, const char *to_match); +bool perf_pmu__name_no_suffix_match(const struct perf_pmu *pmu, const char *to_match);
/** * perf_pmu_is_software - is the PMU a software PMU as in it uses the @@ -273,7 +281,7 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, const char *config_name); void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
-bool perf_pmu__match(const struct perf_pmu *pmu, const char *tok); +bool perf_pmu__wildcard_match(const struct perf_pmu *pmu, const char *wildcard_to_match);
int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size); int perf_pmu__pathname_scnprintf(char *buf, size_t size, diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c index b493da0d22ef..7959af59908c 100644 --- a/tools/perf/util/pmus.c +++ b/tools/perf/util/pmus.c @@ -37,10 +37,25 @@ */ static LIST_HEAD(core_pmus); static LIST_HEAD(other_pmus); -static bool read_sysfs_core_pmus; -static bool read_sysfs_all_pmus; +enum perf_tool_pmu_type { + PERF_TOOL_PMU_TYPE_PE_CORE, + PERF_TOOL_PMU_TYPE_PE_OTHER, + PERF_TOOL_PMU_TYPE_TOOL, + PERF_TOOL_PMU_TYPE_HWMON, + +#define PERF_TOOL_PMU_TYPE_PE_CORE_MASK (1 << PERF_TOOL_PMU_TYPE_PE_CORE) +#define PERF_TOOL_PMU_TYPE_PE_OTHER_MASK (1 << PERF_TOOL_PMU_TYPE_PE_OTHER) +#define PERF_TOOL_PMU_TYPE_TOOL_MASK (1 << PERF_TOOL_PMU_TYPE_TOOL) +#define PERF_TOOL_PMU_TYPE_HWMON_MASK (1 << PERF_TOOL_PMU_TYPE_HWMON) + +#define PERF_TOOL_PMU_TYPE_ALL_MASK (PERF_TOOL_PMU_TYPE_PE_CORE_MASK | \ + PERF_TOOL_PMU_TYPE_PE_OTHER_MASK | \ + PERF_TOOL_PMU_TYPE_TOOL_MASK | \ + PERF_TOOL_PMU_TYPE_HWMON_MASK) +}; +static unsigned int read_pmu_types;
-static void pmu_read_sysfs(bool core_only); +static void pmu_read_sysfs(unsigned int to_read_pmus);
size_t pmu_name_len_no_suffix(const char *str) { @@ -102,8 +117,7 @@ void perf_pmus__destroy(void)
perf_pmu__delete(pmu); } - read_sysfs_core_pmus = false; - read_sysfs_all_pmus = false; + read_pmu_types = 0; }
static struct perf_pmu *pmu_find(const char *name) @@ -129,6 +143,7 @@ struct perf_pmu *perf_pmus__find(const char *name) struct perf_pmu *pmu; int dirfd; bool core_pmu; + unsigned int to_read_pmus = 0;
/* * Once PMU is loaded it stays in the list, @@ -139,11 +154,11 @@ struct perf_pmu *perf_pmus__find(const char *name) if (pmu) return pmu;
- if (read_sysfs_all_pmus) + if (read_pmu_types == PERF_TOOL_PMU_TYPE_ALL_MASK) return NULL;
core_pmu = is_pmu_core(name); - if (core_pmu && read_sysfs_core_pmus) + if (core_pmu && (read_pmu_types & PERF_TOOL_PMU_TYPE_PE_CORE_MASK)) return NULL;
dirfd = perf_pmu__event_source_devices_fd(); @@ -151,15 +166,27 @@ struct perf_pmu *perf_pmus__find(const char *name) /*eager_load=*/false); close(dirfd);
- if (!pmu) { - /* - * Looking up an inidividual PMU failed. This may mean name is - * an alias, so read the PMUs from sysfs and try to find again. - */ - pmu_read_sysfs(core_pmu); + if (pmu) + return pmu; + + /* Looking up an individual perf event PMU failed, check if a tool PMU should be read. */ + if (!strncmp(name, "hwmon_", 6)) + to_read_pmus |= PERF_TOOL_PMU_TYPE_HWMON_MASK; + else if (!strcmp(name, "tool")) + to_read_pmus |= PERF_TOOL_PMU_TYPE_TOOL_MASK; + + if (to_read_pmus) { + pmu_read_sysfs(to_read_pmus); pmu = pmu_find(name); + if (pmu) + return pmu; } - return pmu; + /* Read all necessary PMUs from sysfs and see if the PMU is found. */ + to_read_pmus = PERF_TOOL_PMU_TYPE_PE_CORE_MASK; + if (!core_pmu) + to_read_pmus |= PERF_TOOL_PMU_TYPE_PE_OTHER_MASK; + pmu_read_sysfs(to_read_pmus); + return pmu_find(name); }
static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name) @@ -176,11 +203,11 @@ static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name) if (pmu) return pmu;
- if (read_sysfs_all_pmus) + if (read_pmu_types == PERF_TOOL_PMU_TYPE_ALL_MASK) return NULL;
core_pmu = is_pmu_core(name); - if (core_pmu && read_sysfs_core_pmus) + if (core_pmu && (read_pmu_types & PERF_TOOL_PMU_TYPE_PE_CORE_MASK)) return NULL;
return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name, @@ -197,52 +224,61 @@ static int pmus_cmp(void *priv __maybe_unused, }
/* Add all pmus in sysfs to pmu list: */ -static void pmu_read_sysfs(bool core_only) +static void pmu_read_sysfs(unsigned int to_read_types) { - int fd; - DIR *dir; - struct dirent *dent; struct perf_pmu *tool_pmu;
- if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus)) + if ((read_pmu_types & to_read_types) == to_read_types) { + /* All requested PMU types have been read. */ return; + }
- fd = perf_pmu__event_source_devices_fd(); - if (fd < 0) - return; + if (to_read_types & (PERF_TOOL_PMU_TYPE_PE_CORE_MASK | PERF_TOOL_PMU_TYPE_PE_OTHER_MASK)) { + int fd = perf_pmu__event_source_devices_fd(); + DIR *dir; + struct dirent *dent; + bool core_only = (to_read_types & PERF_TOOL_PMU_TYPE_PE_OTHER_MASK) == 0;
- dir = fdopendir(fd); - if (!dir) { - close(fd); - return; - } + if (fd < 0) + goto skip_pe_pmus;
- while ((dent = readdir(dir))) { - if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) - continue; - if (core_only && !is_pmu_core(dent->d_name)) - continue; - /* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */ - perf_pmu__find2(fd, dent->d_name); - } + dir = fdopendir(fd); + if (!dir) { + close(fd); + goto skip_pe_pmus; + }
- closedir(dir); - if (list_empty(&core_pmus)) { + while ((dent = readdir(dir))) { + if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) + continue; + if (core_only && !is_pmu_core(dent->d_name)) + continue; + /* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */ + perf_pmu__find2(fd, dent->d_name); + } + + closedir(dir); + } +skip_pe_pmus: + if ((to_read_types & PERF_TOOL_PMU_TYPE_PE_CORE_MASK) && list_empty(&core_pmus)) { if (!perf_pmu__create_placeholder_core_pmu(&core_pmus)) pr_err("Failure to set up any core PMUs\n"); } list_sort(NULL, &core_pmus, pmus_cmp); - if (!core_only) { - tool_pmu = perf_pmus__tool_pmu(); - list_add_tail(&tool_pmu->list, &other_pmus); - perf_pmus__read_hwmon_pmus(&other_pmus); + + if ((to_read_types & PERF_TOOL_PMU_TYPE_TOOL_MASK) != 0 && + (read_pmu_types & PERF_TOOL_PMU_TYPE_TOOL_MASK) == 0) { + tool_pmu = tool_pmu__new(); + if (tool_pmu) + list_add_tail(&tool_pmu->list, &other_pmus); } + if ((to_read_types & PERF_TOOL_PMU_TYPE_HWMON_MASK) != 0 && + (read_pmu_types & PERF_TOOL_PMU_TYPE_HWMON_MASK) == 0) + perf_pmus__read_hwmon_pmus(&other_pmus); + list_sort(NULL, &other_pmus, pmus_cmp); - if (!list_empty(&core_pmus)) { - read_sysfs_core_pmus = true; - if (!core_only) - read_sysfs_all_pmus = true; - } + + read_pmu_types |= to_read_types; }
static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type) @@ -263,12 +299,21 @@ static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
struct perf_pmu *perf_pmus__find_by_type(unsigned int type) { + unsigned int to_read_pmus; struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
- if (pmu || read_sysfs_all_pmus) + if (pmu || (read_pmu_types == PERF_TOOL_PMU_TYPE_ALL_MASK)) return pmu;
- pmu_read_sysfs(/*core_only=*/false); + if (type >= PERF_PMU_TYPE_PE_START && type <= PERF_PMU_TYPE_PE_END) { + to_read_pmus = PERF_TOOL_PMU_TYPE_PE_CORE_MASK | + PERF_TOOL_PMU_TYPE_PE_OTHER_MASK; + } else if (type >= PERF_PMU_TYPE_HWMON_START && type <= PERF_PMU_TYPE_HWMON_END) { + to_read_pmus = PERF_TOOL_PMU_TYPE_HWMON_MASK; + } else { + to_read_pmus = PERF_TOOL_PMU_TYPE_TOOL_MASK; + } + pmu_read_sysfs(to_read_pmus); pmu = __perf_pmus__find_by_type(type); return pmu; } @@ -282,7 +327,7 @@ struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu) bool use_core_pmus = !pmu || pmu->is_core;
if (!pmu) { - pmu_read_sysfs(/*core_only=*/false); + pmu_read_sysfs(PERF_TOOL_PMU_TYPE_ALL_MASK); pmu = list_prepare_entry(pmu, &core_pmus, list); } if (use_core_pmus) { @@ -300,7 +345,7 @@ struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu) struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu) { if (!pmu) { - pmu_read_sysfs(/*core_only=*/true); + pmu_read_sysfs(PERF_TOOL_PMU_TYPE_PE_CORE_MASK); return list_first_entry_or_null(&core_pmus, typeof(*pmu), list); } list_for_each_entry_continue(pmu, &core_pmus, list) @@ -316,7 +361,7 @@ static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu) const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : "";
if (!pmu) { - pmu_read_sysfs(/*core_only=*/false); + pmu_read_sysfs(PERF_TOOL_PMU_TYPE_ALL_MASK); pmu = list_prepare_entry(pmu, &core_pmus, list); } else last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: ""); @@ -710,11 +755,25 @@ char *perf_pmus__default_pmu_name(void) struct perf_pmu *evsel__find_pmu(const struct evsel *evsel) { struct perf_pmu *pmu = evsel->pmu; + bool legacy_core_type;
- if (!pmu) { - pmu = perf_pmus__find_by_type(evsel->core.attr.type); - ((struct evsel *)evsel)->pmu = pmu; + if (pmu) + return pmu; + + pmu = perf_pmus__find_by_type(evsel->core.attr.type); + legacy_core_type = + evsel->core.attr.type == PERF_TYPE_HARDWARE || + evsel->core.attr.type == PERF_TYPE_HW_CACHE; + if (!pmu && legacy_core_type) { + if (perf_pmus__supports_extended_type()) { + u32 type = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT; + + pmu = perf_pmus__find_by_type(type); + } else { + pmu = perf_pmus__find_core_pmu(); + } } + ((struct evsel *)evsel)->pmu = pmu; return pmu; }
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index b4bc57859f73..a23fa5d95394 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -47,7 +47,7 @@ struct pyrf_event { };
#define sample_members \ - sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ + sample_member_def(sample_ip, ip, T_ULONGLONG, "event ip"), \ sample_member_def(sample_pid, pid, T_INT, "event pid"), \ sample_member_def(sample_tid, tid, T_INT, "event tid"), \ sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ @@ -481,6 +481,11 @@ static PyObject *pyrf_event__new(const union perf_event *event) event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) return NULL;
+ // FIXME this better be dynamic or we need to parse everything + // before calling perf_mmap__consume(), including tracepoint fields. + if (sizeof(pevent->event) < event->header.size) + return NULL; + ptype = pyrf_event__type[event->header.type]; pevent = PyObject_New(struct pyrf_event, ptype); if (pevent != NULL) @@ -984,20 +989,22 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
evsel = evlist__event2evsel(evlist, event); if (!evsel) { + Py_DECREF(pyevent); Py_INCREF(Py_None); return Py_None; }
pevent->evsel = evsel;
- err = evsel__parse_sample(evsel, event, &pevent->sample); - - /* Consume the even only after we parsed it out. */ perf_mmap__consume(&md->core);
- if (err) + err = evsel__parse_sample(evsel, &pevent->event, &pevent->sample); + if (err) { + Py_DECREF(pyevent); return PyErr_Format(PyExc_OSError, "perf: can't parse sample, err=%d", err); + } + return pyevent; } end: diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index fa8b2a1048ff..d83bda5824d2 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -151,6 +151,7 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type { struct evsel *cur; int evsel_ctx = evsel_context(evsel); + struct perf_pmu *evsel_pmu = evsel__find_pmu(evsel);
evlist__for_each_entry(evsel->evlist, cur) { struct perf_stat_aggr *aggr; @@ -177,7 +178,7 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type * Except the SW CLOCK events, * ignore if not the PMU we're looking for. */ - if ((type != STAT_NSECS) && (evsel->pmu != cur->pmu)) + if ((type != STAT_NSECS) && (evsel_pmu != evsel__find_pmu(cur))) continue;
aggr = &cur->stats->aggr[aggr_idx]; diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 7c2ccdcc3fdb..1f7abd8754c7 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -535,7 +535,10 @@ static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
return 0; } -/* events should have the same name, scale, unit, cgroup but on different PMUs */ +/* + * Events should have the same name, scale, unit, cgroup but on different core + * PMUs or on different but matching uncore PMUs. + */ static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b) { if (strcmp(evsel__name(evsel_a), evsel__name(evsel_b))) @@ -553,7 +556,13 @@ static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b) if (evsel__is_clock(evsel_a) != evsel__is_clock(evsel_b)) return false;
- return evsel_a->pmu != evsel_b->pmu; + if (evsel_a->pmu == evsel_b->pmu || evsel_a->pmu == NULL || evsel_b->pmu == NULL) + return false; + + if (evsel_a->pmu->is_core) + return evsel_b->pmu->is_core; + + return perf_pmu__name_no_suffix_match(evsel_a->pmu, evsel_b->pmu->name); }
static void evsel__merge_aliases(struct evsel *evsel) diff --git a/tools/perf/util/tool_pmu.c b/tools/perf/util/tool_pmu.c index 4fb097578479..d43d6cf6e4a2 100644 --- a/tools/perf/util/tool_pmu.c +++ b/tools/perf/util/tool_pmu.c @@ -62,7 +62,8 @@ int tool_pmu__num_skip_events(void)
const char *tool_pmu__event_to_str(enum tool_pmu_event ev) { - if (ev > TOOL_PMU__EVENT_NONE && ev < TOOL_PMU__EVENT_MAX) + if ((ev > TOOL_PMU__EVENT_NONE && ev < TOOL_PMU__EVENT_MAX) && + !tool_pmu__skip_event(tool_pmu__event_names[ev])) return tool_pmu__event_names[ev];
return NULL; @@ -489,17 +490,24 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread) return 0; }
-struct perf_pmu *perf_pmus__tool_pmu(void) +struct perf_pmu *tool_pmu__new(void) { - static struct perf_pmu tool = { - .name = "tool", - .type = PERF_PMU_TYPE_TOOL, - .aliases = LIST_HEAD_INIT(tool.aliases), - .caps = LIST_HEAD_INIT(tool.caps), - .format = LIST_HEAD_INIT(tool.format), - }; - if (!tool.events_table) - tool.events_table = find_core_events_table("common", "common"); - - return &tool; + struct perf_pmu *tool = zalloc(sizeof(struct perf_pmu)); + + if (!tool) + goto out; + tool->name = strdup("tool"); + if (!tool->name) { + zfree(&tool); + goto out; + } + + tool->type = PERF_PMU_TYPE_TOOL; + INIT_LIST_HEAD(&tool->aliases); + INIT_LIST_HEAD(&tool->caps); + INIT_LIST_HEAD(&tool->format); + tool->events_table = find_core_events_table("common", "common"); + +out: + return tool; } diff --git a/tools/perf/util/tool_pmu.h b/tools/perf/util/tool_pmu.h index a60184859080..c6ad1dd90a56 100644 --- a/tools/perf/util/tool_pmu.h +++ b/tools/perf/util/tool_pmu.h @@ -51,6 +51,6 @@ int evsel__tool_pmu_open(struct evsel *evsel, int start_cpu_map_idx, int end_cpu_map_idx); int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread);
-struct perf_pmu *perf_pmus__tool_pmu(void); +struct perf_pmu *tool_pmu__new(void);
#endif /* __TOOL_PMU_H */ diff --git a/tools/perf/util/units.c b/tools/perf/util/units.c index 32c39cfe209b..4c6a86e1cb54 100644 --- a/tools/perf/util/units.c +++ b/tools/perf/util/units.c @@ -64,7 +64,7 @@ unsigned long convert_unit(unsigned long value, char *unit)
int unit_number__scnprintf(char *buf, size_t size, u64 n) { - char unit[4] = "BKMG"; + char unit[] = "BKMG"; int i = 0;
while (((n / 1024) > 1) && (i < 3)) { diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index 99bf905ade81..e4f9f93c123a 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -168,6 +168,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics .PP \fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. .PP +\fBCoreThr\fP Core Thermal Throttling events during the measurement interval. Note that events since boot can be find in /sys/devices/system/cpu/cpu*/thermal_throttle/* +.PP \fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms or /sys/class/drm/card0/gt/gt0/rc6_residency_ms or /sys/class/drm/card0/device/tile0/gtN/gtidle/idle_residency_ms depending on the graphics driver being used. .PP \fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz or /sys/class/drm/card0/gt_cur_freq_mhz or /sys/class/drm/card0/gt/gt0/rps_cur_freq_mhz or /sys/class/drm/card0/device/tile0/gtN/freq0/cur_freq depending on the graphics driver being used. diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 8d5011a0bf60..4155d9bfcfc6 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -2211,7 +2211,7 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr) return 0; }
-int probe_msr(int cpu, off_t offset) +int probe_rapl_msr(int cpu, off_t offset, int index) { ssize_t retval; unsigned long long value; @@ -2220,13 +2220,22 @@ int probe_msr(int cpu, off_t offset)
retval = pread(get_msr_fd(cpu), &value, sizeof(value), offset);
- /* - * Expect MSRs to accumulate some non-zero value since the system was powered on. - * Treat zero as a read failure. - */ - if (retval != sizeof(value) || value == 0) + /* if the read failed, the probe fails */ + if (retval != sizeof(value)) return 1;
+ /* If an Energy Status Counter MSR returns 0, the probe fails */ + switch (index) { + case RAPL_RCI_INDEX_ENERGY_PKG: + case RAPL_RCI_INDEX_ENERGY_CORES: + case RAPL_RCI_INDEX_DRAM: + case RAPL_RCI_INDEX_GFX: + case RAPL_RCI_INDEX_ENERGY_PLATFORM: + if (value == 0) + return 1; + } + + /* PKG,DRAM_PERF_STATUS MSRs, can return any value */ return 0; }
@@ -3476,7 +3485,7 @@ void delta_core(struct core_data *new, struct core_data *old) old->c6 = new->c6 - old->c6; old->c7 = new->c7 - old->c7; old->core_temp_c = new->core_temp_c; - old->core_throt_cnt = new->core_throt_cnt; + old->core_throt_cnt = new->core_throt_cnt - old->core_throt_cnt; old->mc6_us = new->mc6_us - old->mc6_us;
DELTA_WRAP32(new->core_energy.raw_value, old->core_energy.raw_value); @@ -6030,6 +6039,7 @@ int snapshot_graphics(int idx) int retval;
rewind(gfx_info[idx].fp); + fflush(gfx_info[idx].fp);
switch (idx) { case GFX_rc6: @@ -7896,7 +7906,7 @@ void rapl_perf_init(void) rci->flags[cai->rci_index] = cai->flags;
/* Use MSR for this counter */ - } else if (!no_msr && cai->msr && probe_msr(cpu, cai->msr) == 0) { + } else if (!no_msr && cai->msr && probe_rapl_msr(cpu, cai->msr, cai->rci_index) == 0) { rci->source[cai->rci_index] = COUNTER_SOURCE_MSR; rci->msr[cai->rci_index] = cai->msr; rci->msr_mask[cai->rci_index] = cai->msr_mask; @@ -8034,7 +8044,7 @@ void msr_perf_init_(void) cai->present = true;
/* User MSR for this counter */ - } else if (!no_msr && cai->msr && probe_msr(cpu, cai->msr) == 0) { + } else if (!no_msr && cai->msr && probe_rapl_msr(cpu, cai->msr, cai->rci_index) == 0) { cci->source[cai->rci_index] = COUNTER_SOURCE_MSR; cci->msr[cai->rci_index] = cai->msr; cci->msr_mask[cai->rci_index] = cai->msr_mask; @@ -8148,7 +8158,7 @@ void cstate_perf_init_(bool soft_c1)
/* User MSR for this counter */ } else if (!no_msr && cai->msr && pkg_cstate_limit >= cai->pkg_cstate_limit - && probe_msr(cpu, cai->msr) == 0) { + && probe_rapl_msr(cpu, cai->msr, cai->rci_index) == 0) { cci->source[cai->rci_index] = COUNTER_SOURCE_MSR; cci->msr[cai->rci_index] = cai->msr; } diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 87551628e112..6722080b2107 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -306,6 +306,7 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf/ \ BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) \ + BPF_TARGET_ENDIAN=$(BPF_TARGET_ENDIAN) \ EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \ EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' && \ cp $(RUNQSLOWER_OUTPUT)runqslower $@ diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c index cc184e4420f6..67557cda2208 100644 --- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -6,6 +6,10 @@ #include <test_progs.h> #include "bloom_filter_map.skel.h"
+#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + static void test_fail_cases(void) { LIBBPF_OPTS(bpf_map_create_opts, opts); @@ -69,6 +73,7 @@ static void test_success_cases(void)
/* Create a map */ opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE; + opts.numa_node = NUMA_NO_NODE; fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 544144620ca6..66a900327f91 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -1600,6 +1600,7 @@ static void test_tailcall_bpf2bpf_freplace(void) goto out;
err = bpf_link__destroy(freplace_link); + freplace_link = NULL; if (!ASSERT_OK(err, "destroy link")) goto out;
diff --git a/tools/testing/selftests/bpf/progs/strncmp_bench.c b/tools/testing/selftests/bpf/progs/strncmp_bench.c index 18373a7df76e..f47bf88f8d2a 100644 --- a/tools/testing/selftests/bpf/progs/strncmp_bench.c +++ b/tools/testing/selftests/bpf/progs/strncmp_bench.c @@ -35,7 +35,10 @@ static __always_inline int local_strncmp(const char *s1, unsigned int sz, SEC("tp/syscalls/sys_enter_getpgid") int strncmp_no_helper(void *ctx) { - if (local_strncmp(str, cmp_str_len + 1, target) < 0) + const char *target_str = target; + + barrier_var(target_str); + if (local_strncmp(str, cmp_str_len + 1, target_str) < 0) __sync_add_and_fetch(&hits, 1); return 0; } diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c index 9446673645eb..f0cb14ea8608 100644 --- a/tools/testing/selftests/mm/cow.c +++ b/tools/testing/selftests/mm/cow.c @@ -876,7 +876,7 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize) mremap_size = thpsize / 2; mremap_mem = mmap(NULL, mremap_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (mem == MAP_FAILED) { + if (mremap_mem == MAP_FAILED) { ksft_test_result_fail("mmap() failed\n"); goto munmap; } diff --git a/tools/testing/selftests/pcie_bwctrl/Makefile b/tools/testing/selftests/pcie_bwctrl/Makefile index 3e84e26341d1..48ec048f47af 100644 --- a/tools/testing/selftests/pcie_bwctrl/Makefile +++ b/tools/testing/selftests/pcie_bwctrl/Makefile @@ -1,2 +1,2 @@ -TEST_PROGS = set_pcie_cooling_state.sh +TEST_PROGS = set_pcie_cooling_state.sh set_pcie_speed.sh include ../lib.mk diff --git a/tools/verification/rv/Makefile.rv b/tools/verification/rv/Makefile.rv index 161baa29eb86..2497fb96c83d 100644 --- a/tools/verification/rv/Makefile.rv +++ b/tools/verification/rv/Makefile.rv @@ -27,7 +27,7 @@ endif
INCLUDE := -Iinclude/ CFLAGS := -g -DVERSION="$(VERSION)" $(FOPTS) $(WOPTS) $(EXTRA_CFLAGS) $(INCLUDE) -LDFLAGS := -ggdb $(EXTRA_LDFLAGS) +LDFLAGS := -ggdb $(LDFLAGS) $(EXTRA_LDFLAGS)
INSTALL := install MKDIR := mkdir
linux-stable-mirror@lists.linaro.org