I'm announcing the release of the 6.1.157 kernel.
All users of the 6.1 kernel series must upgrade.
The updated 6.1.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.1.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/admin-guide/kernel-parameters.txt | 3 Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml | 15 Makefile | 2 arch/arm/mach-omap2/pm33xx-core.c | 6 arch/arm64/boot/dts/qcom/msm8916.dtsi | 2 arch/arm64/boot/dts/qcom/sdm845.dtsi | 4 arch/arm64/boot/dts/ti/k3-am62a-main.dtsi | 2 arch/loongarch/kernel/setup.c | 5 arch/parisc/include/uapi/asm/ioctls.h | 8 arch/parisc/lib/memcpy.c | 1 arch/powerpc/platforms/powernv/pci-ioda.c | 2 arch/powerpc/platforms/pseries/msi.c | 2 arch/sparc/kernel/of_device_32.c | 1 arch/sparc/kernel/of_device_64.c | 1 arch/sparc/mm/hugetlbpage.c | 20 arch/x86/kernel/umip.c | 15 arch/x86/kvm/emulate.c | 11 arch/x86/kvm/kvm_emulate.h | 2 arch/x86/kvm/x86.c | 9 arch/xtensa/platforms/iss/simdisk.c | 6 block/blk-crypto-fallback.c | 3 crypto/essiv.c | 14 drivers/acpi/acpi_dbg.c | 26 drivers/acpi/acpi_tad.c | 3 drivers/acpi/acpica/evglock.c | 4 drivers/acpi/property.c | 137 ++- drivers/bus/mhi/host/init.c | 5 drivers/char/ipmi/ipmi_msghandler.c | 416 ++++------ drivers/char/tpm/tpm_tis_core.c | 4 drivers/clk/at91/clk-peripheral.c | 7 drivers/clk/mediatek/clk-mt8195-infra_ao.c | 2 drivers/clk/mediatek/clk-mux.c | 4 drivers/clk/nxp/clk-lpc18xx-cgu.c | 20 drivers/clk/tegra/clk-bpmp.c | 2 drivers/clocksource/clps711x-timer.c | 23 drivers/cpufreq/intel_pstate.c | 8 drivers/cpufreq/tegra186-cpufreq.c | 8 drivers/cpuidle/governors/menu.c | 21 drivers/crypto/aspeed/aspeed-hace-crypto.c | 2 drivers/crypto/atmel-tdes.c | 2 drivers/firmware/meson/meson_sm.c | 7 drivers/gpio/gpio-wcd934x.c | 3 drivers/gpu/drm/amd/display/dc/dce/dce_transform.c | 21 drivers/gpu/drm/amd/display/dc/dce/dce_transform.h | 4 drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h | 7 drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h | 2 drivers/gpu/drm/nouveau/nouveau_bo.c | 2 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c | 6 drivers/iio/adc/xilinx-ams.c | 47 - drivers/iio/dac/ad5360.c | 2 drivers/iio/dac/ad5421.c | 2 drivers/iio/frequency/adf4350.c | 20 drivers/iio/imu/inv_icm42600/inv_icm42600_core.c | 4 drivers/iommu/intel/iommu.c | 2 drivers/mailbox/zynqmp-ipi-mailbox.c | 7 drivers/media/i2c/mt9v111.c | 2 drivers/media/mc/mc-devnode.c | 6 drivers/media/mc/mc-entity.c | 2 drivers/media/pci/cx18/cx18-queue.c | 13 drivers/media/pci/ivtv/ivtv-irq.c | 2 drivers/media/pci/ivtv/ivtv-yuv.c | 8 drivers/media/rc/lirc_dev.c | 9 drivers/memory/samsung/exynos-srom.c | 10 drivers/mfd/intel_soc_pmic_chtdc_ti.c | 5 drivers/mmc/core/sdio.c | 6 drivers/mtd/nand/raw/fsmc_nand.c | 6 drivers/net/ethernet/freescale/fsl_pq_mdio.c | 2 drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 2 drivers/net/wireless/ath/ath11k/core.c | 6 drivers/net/wireless/ath/ath11k/hal.c | 16 drivers/net/wireless/ath/ath11k/hal.h | 1 drivers/nvme/host/pci.c | 2 drivers/pci/controller/dwc/pci-keystone.c | 4 drivers/pci/controller/dwc/pcie-tegra194.c | 22 drivers/pci/controller/pci-tegra.c | 27 drivers/pci/controller/pcie-rcar-host.c | 40 drivers/pci/endpoint/functions/pci-epf-test.c | 19 drivers/pci/iov.c | 5 drivers/pci/pci-driver.c | 1 drivers/pci/pci-sysfs.c | 20 drivers/pci/pcie/aer.c | 12 drivers/pci/pcie/err.c | 8 drivers/power/supply/max77976_charger.c | 12 drivers/pwm/pwm-berlin.c | 4 drivers/rtc/interface.c | 27 drivers/rtc/rtc-optee.c | 1 drivers/rtc/rtc-x1205.c | 2 drivers/scsi/hpsa.c | 21 drivers/scsi/mvsas/mv_defs.h | 1 drivers/scsi/mvsas/mv_init.c | 13 drivers/scsi/mvsas/mv_sas.c | 42 - drivers/scsi/mvsas/mv_sas.h | 8 drivers/spi/spi-cadence-quadspi.c | 5 drivers/xen/events/events_base.c | 20 drivers/xen/manage.c | 3 fs/btrfs/export.c | 8 fs/btrfs/extent_io.c | 14 fs/cramfs/inode.c | 11 fs/ext4/fsmap.c | 14 fs/ext4/inode.c | 10 fs/ext4/orphan.c | 17 fs/ext4/xattr.c | 15 fs/file.c | 5 fs/fs-writeback.c | 32 fs/fsopen.c | 70 - fs/minix/inode.c | 8 fs/namei.c | 8 fs/namespace.c | 11 fs/nfsd/lockd.c | 15 fs/nfsd/nfs4proc.c | 2 fs/ntfs3/bitmap.c | 1 fs/smb/server/ksmbd_netlink.h | 5 fs/smb/server/server.h | 1 fs/smb/server/transport_ipc.c | 3 fs/smb/server/transport_tcp.c | 27 fs/squashfs/inode.c | 24 include/acpi/acpixf.h | 6 include/asm-generic/io.h | 126 +-- include/linux/iio/frequency/adf4350.h | 2 include/linux/sched.h | 11 include/media/v4l2-subdev.h | 30 include/scsi/libsas.h | 18 include/trace/events/rwmmio.h | 43 - init/main.c | 12 kernel/bpf/inode.c | 4 kernel/fork.c | 2 kernel/pid.c | 2 kernel/rseq.c | 10 kernel/sched/deadline.c | 73 + kernel/sys.c | 22 kernel/trace/trace_kprobe.c | 11 kernel/trace/trace_probe.h | 9 kernel/trace/trace_uprobe.c | 13 lib/crypto/Makefile | 4 lib/genalloc.c | 5 lib/trace_readwrite.c | 16 mm/hugetlb.c | 3 mm/page_alloc.c | 2 net/bridge/br_vlan.c | 2 net/core/filter.c | 2 net/ipv4/tcp_input.c | 1 net/mptcp/pm.c | 7 net/mptcp/pm_netlink.c | 55 + net/mptcp/protocol.h | 8 net/sctp/sm_make_chunk.c | 3 net/sctp/sm_statefuns.c | 6 security/keys/trusted-keys/trusted_tpm1.c | 7 sound/soc/codecs/wcd934x.c | 30 tools/build/feature/Makefile | 4 tools/lib/perf/include/perf/event.h | 1 tools/perf/tests/perf-record.c | 4 tools/perf/util/arm-spe-decoder/arm-spe-decoder.c | 30 tools/perf/util/arm-spe-decoder/arm-spe-decoder.h | 65 + tools/perf/util/arm-spe.c | 42 - tools/perf/util/evsel.c | 2 tools/perf/util/lzma.c | 2 tools/perf/util/session.c | 2 tools/perf/util/zlib.c | 2 tools/testing/selftests/net/mptcp/mptcp_join.sh | 11 tools/testing/selftests/rseq/rseq.c | 8 tools/testing/selftests/vm/madv_populate.c | 9 tools/testing/selftests/vm/soft-dirty.c | 5 tools/testing/selftests/vm/vm_util.c | 77 + tools/testing/selftests/vm/vm_util.h | 2 164 files changed, 1637 insertions(+), 854 deletions(-)
Aaron Kling (1): cpufreq: tegra186: Set target frequency for all cpus in policy
Adam Xue (1): bus: mhi: host: Do not use uninitialized 'dev' pointer in mhi_init_irq_setup()
Ahmet Eray Karadag (1): ext4: guard against EA inode refcount underflow in xattr update
Aleksa Sarai (1): fscontext: do not consume log entries when returning -EMSGSIZE
Alex Deucher (1): drm/amdgpu: Add additional DCE6 SCL registers
Alexandr Sapozhnikov (1): net/sctp: fix a null dereference in sctp_disposition sctp_sf_do_5_1D_ce()
Alok Tiwari (1): clk: nxp: Fix pll0 rate check condition in LPC18xx CGU driver
Amir Mohammad Jahangirzad (1): ACPI: debug: fix signedness issues in read/write helpers
Anderson Nascimento (1): btrfs: avoid potential out-of-bounds in btrfs_encode_fh()
Andy Shevchenko (2): gpio: wcd934x: Remove duplicate assignment of of_gpio_n_cells mfd: intel_soc_pmic_chtdc_ti: Drop unneeded assignment for cache_type
AngeloGioacchino Del Regno (1): clk: mediatek: mt8195-infra_ao: Fix parent for infra_ao_hdmi_26m
Anthony Yznaga (1): sparc64: fix hugetlb for sun4u
Askar Safin (1): openat2: don't trigger automounts with RESOLVE_NO_XDEV
Bartosz Golaszewski (1): gpio: wcd934x: mark the GPIO controller as sleeping
Brian Masney (2): clk: at91: peripheral: fix return value clk: nxp: lpc18xx-cgu: convert from round_rate() to determine_rate()
Brian Norris (1): PCI/sysfs: Ensure devices are powered for config reads
Chen-Yu Tsai (1): clk: mediatek: clk-mux: Do not pass flags to clk_mux_determine_rate_flags()
Clément Le Goffic (1): rtc: optee: fix memory leak on driver removal
Corey Minyard (1): ipmi: Rework user message limit handling
Dan Carpenter (1): net/mlx4: prevent potential use after free in mlx4_en_do_uc_filter()
Daniel Borkmann (1): bpf: Fix metadata_dst leak __bpf_redirect_neigh_v{4,6}
Daniel Tang (1): ACPI: TAD: Add missing sysfs_remove_group() for ACPI_TAD_RT
Duoming Zhou (1): scsi: mvsas: Fix use-after-free bugs in mvs_work_queue
Dzmitry Sankouski (1): power: supply: max77976_charger: fix constant current reporting
Edward Adam Davis (1): media: mc: Clear minor number before put device
Eric Biggers (2): KEYS: trusted_tpm1: Compare HMAC values in constant time sctp: Fix MAC comparison to be constant-time
Eric Woudstra (1): bridge: br_vlan_fill_forward_path_pvid: use br_vlan_group_rcu()
Erick Karanja (1): net: fsl_pq_mdio: Fix device node reference leak in fsl_pq_mdio_probe
Esben Haabendal (2): rtc: interface: Ensure alarm irq is enabled when UIE is enabled rtc: interface: Fix long-standing race when setting alarm
Fedor Pchelkin (1): clk: tegra: do not overallocate memory for bpmp clocks
Georg Gottleuber (1): nvme-pci: Add TUXEDO IBS Gen8 to Samsung sleep quirk
German Gomez (1): perf arm-spe: Refactor arm-spe to support operation packet type
Greg Kroah-Hartman (1): Linux 6.1.157
Guenter Roeck (1): ipmi: Fix handling of messages with provided receive message pointer
Gunnar Kudrjavets (1): tpm_tis: Fix incorrect arguments in tpm_tis_probe_irq_single
Hans de Goede (2): mfd: intel_soc_pmic_chtdc_ti: Fix invalid regmap-config max_register value mfd: intel_soc_pmic_chtdc_ti: Set use_single_read regmap_config flag
Haoxiang Li (1): fs/ntfs3: Fix a resource leak bug in wnd_extend()
Harini T (2): mailbox: zynqmp-ipi: Remove redundant mbox_controller_unregister() call mailbox: zynqmp-ipi: Remove dev.parent check in zynqmp_ipi_free_mboxes
Harshit Agarwal (1): sched/deadline: Fix race in push_dl_task()
Herbert Xu (1): crypto: essiv - Check ssize for decryption and in-place encryption
Huacai Chen (3): LoongArch: Init acpi_gbl_use_global_lock to false init: handle bootloader identifier in kernel parameters ACPICA: Allow to skip Global Lock initialization
Ian Forbes (2): drm/vmwgfx: Fix Use-after-free in validation drm/vmwgfx: Fix copy-paste typo in validation
Ian Rogers (3): perf evsel: Avoid container_of on a NULL leader libperf event: Ensure tracing data is multiple of 8 sized perf test: Don't leak workload gopipe in PERF_RECORD_*
Jan Kara (4): ext4: verify orphan file size is not too big ext4: free orphan info with kvfree writeback: Avoid softlockup when switching many inodes writeback: Avoid excessively long inode switching times
Jason Andryuk (2): xen/events: Cleanup find_virq() return codes xen/events: Update virq_to_irq on migration
Jisheng Zhang (1): pwm: berlin: Fix wrong register in suspend/resume
Johan Hovold (2): firmware: meson_sm: fix device leak at probe lib/genalloc: fix device leak in of_gen_pool_get()
John David Anglin (1): parisc: Remove spurious if statement from raw_copy_from_user()
John Garry (3): scsi: libsas: Add sas_task_find_rq() scsi: mvsas: Delete mvs_tag_init() scsi: mvsas: Use sas_task_find_rq() for tagging
KaFai Wan (1): bpf: Avoid RCU context warning when unpinning htab with internal structs
Krzysztof Kozlowski (1): ASoC: codecs: wcd934x: Simplify with dev_err_probe
Kuniyuki Iwashima (1): tcp: Don't call reqsk_fastopen_remove() in tcp_conn_request().
Lance Yang (1): selftests/mm: skip soft-dirty tests when CONFIG_MEM_SOFT_DIRTY is disabled
Laurent Pinchart (1): media: mc: Fix MUST_CONNECT handling for pads with no links
Leo Yan (5): perf arm_spe: Correct setting remote access perf arm-spe: Rename the common data source encoding perf arm_spe: Correct memory level for remote access perf session: Fix handling when buffer exceeds 2 GiB tools build: Align warning options with perf
Li RongQing (1): mm/hugetlb: early exit from hugetlb_pages_alloc_boot() when max_huge_pages=0
Lichen Liu (1): fs: Add 'initramfs_options' to set initramfs mount options
Linus Walleij (1): mtd: rawnand: fsmc: Default to autodetect buswidth
Lu Baolu (1): iommu/vt-d: PRS isn't usable if PDS isn't supported
Lukas Wunner (3): xen/manage: Fix suspend error path PCI/ERR: Fix uevent on failure to recover PCI/AER: Support errors introduced by PCIe r6.0
Ma Ke (3): media: lirc: Fix error handling in lirc_register() sparc: fix error handling in scan_one_device() ASoC: wcd934x: fix error handling in wcd934x_codec_parse_data()
Marek Vasut (3): PCI: tegra: Convert struct tegra_msi mask_lock into raw spinlock PCI: rcar-host: Drop PMSR spinlock PCI: rcar-host: Convert struct rcar_msi mask_lock into raw spinlock
Matthieu Baerts (NGI0) (2): selftests: mptcp: join: validate C-flag + def limit mptcp: pm: in-kernel: usable client side with C-flag
Miaoqian Lin (2): ARM: OMAP2+: pm33xx-core: ix device node reference leaks in amx3_idle_init xtensa: simdisk: add input size check in proc_write_simdisk
Michael Hennerich (2): iio: frequency: adf4350: Fix ADF4350_REG3_12BIT_CLKDIV_MODE iio: frequency: adf4350: Fix prescaler usage.
Michael Riesch (1): dt-bindings: phy: rockchip-inno-csi-dphy: make power-domains non-required
Muhammad Usama Anjum (1): wifi: ath11k: HAL SRNG: don't deinitialize and re-initialize again
Nam Cao (2): powerpc/powernv/pci: Fix underflow and leak issue powerpc/pseries/msi: Fix potential underflow and leak issue
Namjae Jeon (1): ksmbd: add max ip connections parameter
Nathan Chancellor (1): lib/crypto/curve25519-hacl64: Disable KASAN with clang-17 and older
Niklas Cassel (1): PCI: tegra194: Fix broken tegra_pcie_ep_raise_msi_irq()
Niklas Schnelle (2): PCI/IOV: Add PCI rescan-remove locking when enabling/disabling SR-IOV PCI/AER: Fix missing uevent on recovery when a reset is requested
Ojaswin Mujoo (1): ext4: correctly handle queries for metadata mappings
Oleg Nesterov (1): kernel/sys.c: fix the racy usage of task_lock(tsk->group_leader) in sys_prlimit64() paths
Olga Kornievskaia (1): nfsd: nfserr_jukebox in nlm_fopen should lead to a retry
Phillip Lougher (2): Squashfs: add additional inode sanity checking Squashfs: reject negative file sizes in squashfs_read_inode()
Pratyush Yadav (2): spi: cadence-quadspi: Flush posted register writes before INDAC access spi: cadence-quadspi: Flush posted register writes before DAC access
Qianfeng Rong (3): media: i2c: mt9v111: fix incorrect type for ret iio: dac: ad5360: use int type to store negative error codes iio: dac: ad5421: use int type to store negative error codes
Qu Wenruo (1): btrfs: fix the incorrect max_bytes value for find_lock_delalloc_range()
Rafael J. Wysocki (6): ACPI: property: Fix buffer properties extraction for subnodes cpuidle: governors: menu: Avoid using invalid recent intervals data cpufreq: intel_pstate: Fix object lifecycle issue in update_qos_request() ACPI: property: Disregard references in data-only subnode lists ACPI: property: Add code comments explaining what is going on ACPI: property: Do not pass NULL handles to acpi_attach_data()
Rex Chen (1): mmc: core: SPI mode remove cmd7
Rob Herring (Arm) (1): rtc: x1205: Fix Xicor X1205 vendor prefix
Sai Prakash Ranjan (1): asm-generic/io: Add _RET_IP_ to MMIO trace for more accurate debug info
Sam James (1): parisc: don't reference obsolete termio struct for TC* constants
Sean Anderson (2): iio: xilinx-ams: Fix AMS_ALARM_THR_DIRECT_MASK iio: xilinx-ams: Unmask interrupts after updating alarms
Sean Christopherson (4): rseq/selftests: Use weak symbol reference, not definition, to link with glibc x86/umip: Check that the instruction opcode is at least two bytes x86/umip: Fix decoding of register forms of 0F 01 (SGDT and SIDT aliases) KVM: x86: Don't (re)check L1 intercepts when completing userspace I/O
Sean Nyekjaer (1): iio: imu: inv_icm42600: Drop redundant pm_runtime reinitialization in resume
Shin'ichiro Kawasaki (1): PCI: endpoint: pci-epf-test: Add NULL check for DMA channels before release
Shuhao Fu (1): drm/nouveau: fix bad ret code in nouveau_bo_move_prep
Siddharth Vadapalli (1): PCI: keystone: Use devm_request_irq() to free "ks-pcie-error-irq" on exit
Simon Schuster (1): copy_sighand: Handle architectures where sizeof(unsigned long) < sizeof(u64)
Stephan Gerhold (2): arm64: dts: qcom: msm8916: Add missing MDSS reset arm64: dts: qcom: sdm845: Fix slimbam num-channels/ees
Tetsuo Handa (2): minixfs: Verify inode mode when loading from disk cramfs: Verify inode mode when loading from disk
Thadeu Lima de Souza Cascardo (1): mm/page_alloc: only set ALLOC_HIGHATOMIC for __GPF_HIGH allocations
Thomas Fourier (4): media: cx18: Add missing check after DMA map media: pci: ivtv: Add missing check after DMA map crypto: aspeed - Fix dma_unmap_sg() direction crypto: atmel - Fix dma_unmap_sg() direction
Thomas Gleixner (1): rseq: Protect event mask against membarrier IPI
Thomas Weißschuh (1): fs: always return zero on success from replace_fd()
Thorsten Blum (2): scsi: hpsa: Fix potential memory leak in hpsa_big_passthru_ioctl() NFSD: Fix destination buffer size in nfsd4_ssc_setup_dul()
Tiezhu Yang (1): LoongArch: Remove CONFIG_ACPI_TABLE_UPGRADE in platform_init()
Timur Kristóf (3): drm/amd/display: Add missing DCE6 SCL_HORZ_FILTER_INIT* SRIs drm/amd/display: Properly clear SCL_*_FILTER_CONTROL on DCE6 drm/amd/display: Properly disable scaling on DCE6
Tomi Valkeinen (1): media: v4l2-subdev: Fix alloc failure check in v4l2_subdev_call_state_try()
Varad Gautam (1): asm-generic/io.h: Skip trace helpers if rwmmio events are disabled
Vibhore Vardhan (1): arm64: dts: ti: k3-am62a-main: Fix main padcfg length
Vidya Sagar (1): PCI: tegra194: Handle errors in BPMP response
Vladimir Oltean (1): asm-generic/io.h: suppress endianness warnings for relaxed accessors
Wang Jiang (1): PCI: endpoint: Remove surplus return statement from pci_epf_test_clean_dma_chan()
Yongjian Sun (1): ext4: increase i_disksize to offset + len in ext4_update_disksize_before_punch()
Yu Kuai (1): blk-crypto: fix missing blktrace bio split events
Yuan Chen (1): tracing: Fix race condition in kprobe initialization causing NULL pointer dereference
Yunseong Kim (1): perf util: Fix compression checks returning -1 as bool
Zhen Ni (2): clocksource/drivers/clps711x: Fix resource leaks in error paths memory: samsung: exynos-srom: Fix of_iomap leak in exynos_srom_probe
gaoxiang17 (1): pid: Add a judgment for ns null in pid_nr_ns
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index cce273172739..05ab068c1cc6 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5446,6 +5446,9 @@
rootflags= [KNL] Set root filesystem mount option string
+ initramfs_options= [KNL] + Specify mount options for for the initramfs mount. + rootfstype= [KNL] Set root filesystem type
rootwait [KNL] Wait (indefinitely) for root device to show up. diff --git a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml index 0e6505e9da50..adf852717237 100644 --- a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml +++ b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml @@ -57,11 +57,24 @@ required: - clocks - clock-names - '#phy-cells' - - power-domains - resets - reset-names - rockchip,grf
+allOf: + - if: + properties: + compatible: + contains: + enum: + - rockchip,px30-csi-dphy + - rockchip,rk1808-csi-dphy + - rockchip,rk3326-csi-dphy + - rockchip,rk3368-csi-dphy + then: + required: + - power-domains + additionalProperties: false
examples: diff --git a/Makefile b/Makefile index 1e36a4ea74e1..8e4848eaaed4 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 1 -SUBLEVEL = 156 +SUBLEVEL = 157 EXTRAVERSION = NAME = Curry Ramen
diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c index bf0d25fd2cea..58b98a17746c 100644 --- a/arch/arm/mach-omap2/pm33xx-core.c +++ b/arch/arm/mach-omap2/pm33xx-core.c @@ -393,12 +393,15 @@ static int __init amx3_idle_init(struct device_node *cpu_node, int cpu) if (!state_node) break;
- if (!of_device_is_available(state_node)) + if (!of_device_is_available(state_node)) { + of_node_put(state_node); continue; + }
if (i == CPUIDLE_STATE_MAX) { pr_warn("%s: cpuidle states reached max possible\n", __func__); + of_node_put(state_node); break; }
@@ -408,6 +411,7 @@ static int __init amx3_idle_init(struct device_node *cpu_node, int cpu) states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 | WFI_FLAG_FLUSH_CACHE;
+ of_node_put(state_node); state_count++; }
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 571ed1abdad4..14d2110574bb 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -981,6 +981,8 @@ mdss: mdss@1a00000 {
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&gcc GCC_MDSS_BCR>; + interrupt-controller; #interrupt-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index b77f65a612a1..015239ed42a7 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -5134,11 +5134,11 @@ slimbam: dma-controller@17184000 { compatible = "qcom,bam-v1.7.0"; qcom,controlled-remotely; reg = <0 0x17184000 0 0x2a000>; - num-channels = <31>; + num-channels = <23>; interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; qcom,ee = <1>; - qcom,num-ees = <2>; + qcom,num-ees = <4>; iommus = <&apps_smmu 0x1806 0x0>; };
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi index 4b349d73da21..cabfe4b27230 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi @@ -97,7 +97,7 @@ k3_reset: reset-controller {
main_pmx0: pinctrl@f4000 { compatible = "pinctrl-single"; - reg = <0x00 0xf4000 0x00 0x2ac>; + reg = <0x00 0xf4000 0x00 0x25c>; #pinctrl-cells = <1>; pinctrl-single,register-width = <32>; pinctrl-single,function-mask = <0xffffffff>; diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index b00e885d9845..e2d294aebab2 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -252,10 +252,9 @@ void __init platform_init(void) arch_reserve_vmcore(); arch_parse_crashkernel();
-#ifdef CONFIG_ACPI_TABLE_UPGRADE - acpi_table_upgrade(); -#endif #ifdef CONFIG_ACPI + acpi_table_upgrade(); + acpi_gbl_use_global_lock = false; acpi_gbl_use_default_register_widths = false; acpi_boot_table_init(); #endif diff --git a/arch/parisc/include/uapi/asm/ioctls.h b/arch/parisc/include/uapi/asm/ioctls.h index 82d1148c6379..74b4027a4e80 100644 --- a/arch/parisc/include/uapi/asm/ioctls.h +++ b/arch/parisc/include/uapi/asm/ioctls.h @@ -10,10 +10,10 @@ #define TCSETS _IOW('T', 17, struct termios) /* TCSETATTR */ #define TCSETSW _IOW('T', 18, struct termios) /* TCSETATTRD */ #define TCSETSF _IOW('T', 19, struct termios) /* TCSETATTRF */ -#define TCGETA _IOR('T', 1, struct termio) -#define TCSETA _IOW('T', 2, struct termio) -#define TCSETAW _IOW('T', 3, struct termio) -#define TCSETAF _IOW('T', 4, struct termio) +#define TCGETA 0x40125401 +#define TCSETA 0x80125402 +#define TCSETAW 0x80125403 +#define TCSETAF 0x80125404 #define TCSBRK _IO('T', 5) #define TCXONC _IO('T', 6) #define TCFLSH _IO('T', 7) diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index 69d65ffab312..03165c82dfdb 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -41,7 +41,6 @@ unsigned long raw_copy_from_user(void *dst, const void __user *src, mtsp(get_kernel_space(), SR_TEMP2);
/* Check region is user accessible */ - if (start) while (start < end) { if (!prober_user(SR_TEMP1, start)) { newlen = (start - (unsigned long) src); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 5c144c05cbfd..30f8ecb9896c 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2234,7 +2234,7 @@ static int pnv_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, return 0;
out: - irq_domain_free_irqs_parent(domain, virq, i - 1); + irq_domain_free_irqs_parent(domain, virq, i); msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, nr_irqs); return ret; } diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index e10ac4887823..4d60708d32ce 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -590,7 +590,7 @@ static int pseries_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
out: /* TODO: handle RTAS cleanup in ->msi_finish() ? */ - irq_domain_free_irqs_parent(domain, virq, i - 1); + irq_domain_free_irqs_parent(domain, virq, i); return ret; }
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 9ac6853b34c1..1259f4b1ae9a 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c @@ -387,6 +387,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
if (of_device_register(op)) { printk("%pOF: Could not register of device.\n", dp); + put_device(&op->dev); kfree(op); op = NULL; } diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index a8ccd7260fe7..b20b21dfda69 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -680,6 +680,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
if (of_device_register(op)) { printk("%pOF: Could not register of device.\n", dp); + put_device(&op->dev); kfree(op); op = NULL; } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index d8e0e3c7038d..0c9711aad5aa 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -133,6 +133,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) { + unsigned long hugepage_size = _PAGE_SZ4MB_4U; + + pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4U; + + switch (shift) { + case HPAGE_256MB_SHIFT: + hugepage_size = _PAGE_SZ256MB_4U; + pte_val(entry) |= _PAGE_PMD_HUGE; + break; + case HPAGE_SHIFT: + pte_val(entry) |= _PAGE_PMD_HUGE; + break; + case HPAGE_64K_SHIFT: + hugepage_size = _PAGE_SZ64K_4U; + break; + default: + WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); + } + + pte_val(entry) = pte_val(entry) | hugepage_size; return entry; }
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index 5a4b21389b1d..d432f3824f0c 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -156,15 +156,26 @@ static int identify_insn(struct insn *insn) if (!insn->modrm.nbytes) return -EINVAL;
- /* All the instructions of interest start with 0x0f. */ - if (insn->opcode.bytes[0] != 0xf) + /* The instructions of interest have 2-byte opcodes: 0F 00 or 0F 01. */ + if (insn->opcode.nbytes < 2 || insn->opcode.bytes[0] != 0xf) return -EINVAL;
if (insn->opcode.bytes[1] == 0x1) { switch (X86_MODRM_REG(insn->modrm.value)) { case 0: + /* The reg form of 0F 01 /0 encodes VMX instructions. */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) + return -EINVAL; + return UMIP_INST_SGDT; case 1: + /* + * The reg form of 0F 01 /1 encodes MONITOR/MWAIT, + * STAC/CLAC, and ENCLS. + */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) + return -EINVAL; + return UMIP_INST_SIDT; case 4: return UMIP_INST_SMSW; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 4a43261d25a2..0cb97e570f63 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -5478,12 +5478,11 @@ void init_decode_cache(struct x86_emulate_ctxt *ctxt) ctxt->mem_read.end = 0; }
-int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) +int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts) { const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; - unsigned emul_flags;
ctxt->mem_read.pos = 0;
@@ -5497,8 +5496,6 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) rc = emulate_ud(ctxt); goto done; } - - emul_flags = ctxt->ops->get_hflags(ctxt); if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || @@ -5532,7 +5529,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) fetch_possible_mmx_operand(&ctxt->dst); }
- if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { + if (unlikely(check_intercepts) && ctxt->intercept) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5561,7 +5558,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) goto done; }
- if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + if (unlikely(check_intercepts) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5615,7 +5612,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
special_insn:
- if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + if (unlikely(check_intercepts) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 89246446d6aa..034cafba2cc6 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -517,7 +517,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt); #define EMULATION_RESTART 1 #define EMULATION_INTERCEPTED 2 void init_decode_cache(struct x86_emulate_ctxt *ctxt); -int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); +int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts); int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 11ca05d830e7..b349e1fbf9db 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8976,7 +8976,14 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ctxt->exception.address = 0; }
- r = x86_emulate_insn(ctxt); + /* + * Check L1's instruction intercepts when emulating instructions for + * L2, unless KVM is re-emulating a previously decoded instruction, + * e.g. to complete userspace I/O, in which case KVM has already + * checked the intercepts. + */ + r = x86_emulate_insn(ctxt, is_guest_mode(vcpu) && + !(emulation_type & EMULTYPE_NO_DECODE));
if (r == EMULATION_INTERCEPTED) return 1; diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index f50caaa1c249..adc969395fb0 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -230,10 +230,14 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf, static ssize_t proc_write_simdisk(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - char *tmp = memdup_user_nul(buf, count); + char *tmp; struct simdisk *dev = pde_data(file_inode(file)); int err;
+ if (count == 0 || count > PAGE_SIZE) + return -EINVAL; + + tmp = memdup_user_nul(buf, count); if (IS_ERR(tmp)) return PTR_ERR(tmp);
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index e6468eab2681..7f1a9dba4049 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/random.h> #include <linux/scatterlist.h> +#include <trace/events/block.h>
#include "blk-cgroup.h" #include "blk-crypto-internal.h" @@ -229,7 +230,9 @@ static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr) bio->bi_status = BLK_STS_RESOURCE; return false; } + bio_chain(split_bio, bio); + trace_block_split(split_bio, bio->bi_iter.bi_sector); submit_bio_noacct(bio); *bio_ptr = split_bio; } diff --git a/crypto/essiv.c b/crypto/essiv.c index 307eba74b901..9aa8603fcf63 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -186,9 +186,14 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc) const struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm); struct essiv_aead_request_ctx *rctx = aead_request_ctx(req); struct aead_request *subreq = &rctx->aead_req; + int ivsize = crypto_aead_ivsize(tfm); + int ssize = req->assoclen - ivsize; struct scatterlist *src = req->src; int err;
+ if (ssize < 0) + return -EINVAL; + crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv);
/* @@ -198,19 +203,12 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc) */ rctx->assoc = NULL; if (req->src == req->dst || !enc) { - scatterwalk_map_and_copy(req->iv, req->dst, - req->assoclen - crypto_aead_ivsize(tfm), - crypto_aead_ivsize(tfm), 1); + scatterwalk_map_and_copy(req->iv, req->dst, ssize, ivsize, 1); } else { u8 *iv = (u8 *)aead_request_ctx(req) + tctx->ivoffset; - int ivsize = crypto_aead_ivsize(tfm); - int ssize = req->assoclen - ivsize; struct scatterlist *sg; int nents;
- if (ssize < 0) - return -EINVAL; - nents = sg_nents_for_len(req->src, ssize); if (nents < 0) return -EINVAL; diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c index d50261d05f3a..515b20d0b698 100644 --- a/drivers/acpi/acpi_dbg.c +++ b/drivers/acpi/acpi_dbg.c @@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file) return 0; }
-static int acpi_aml_read_user(char __user *buf, int len) +static ssize_t acpi_aml_read_user(char __user *buf, size_t len) { - int ret; struct circ_buf *crc = &acpi_aml_io.out_crc; - int n; + ssize_t ret; + size_t n; char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER); @@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len) /* sync head before removing logs */ smp_rmb(); p = &crc->buf[crc->tail]; - n = min(len, circ_count_to_end(crc)); + n = min_t(size_t, len, circ_count_to_end(crc)); if (copy_to_user(buf, p, n)) { ret = -EFAULT; goto out; @@ -599,8 +599,8 @@ static int acpi_aml_read_user(char __user *buf, int len) static ssize_t acpi_aml_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - int ret = 0; - int size = 0; + ssize_t ret = 0; + ssize_t size = 0;
if (!count) return 0; @@ -639,11 +639,11 @@ static ssize_t acpi_aml_read(struct file *file, char __user *buf, return size > 0 ? size : ret; }
-static int acpi_aml_write_user(const char __user *buf, int len) +static ssize_t acpi_aml_write_user(const char __user *buf, size_t len) { - int ret; struct circ_buf *crc = &acpi_aml_io.in_crc; - int n; + ssize_t ret; + size_t n; char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER); @@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len) /* sync tail before inserting cmds */ smp_mb(); p = &crc->buf[crc->head]; - n = min(len, circ_space_to_end(crc)); + n = min_t(size_t, len, circ_space_to_end(crc)); if (copy_from_user(p, buf, n)) { ret = -EFAULT; goto out; @@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len) ret = n; out: acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0); - return n; + return ret; }
static ssize_t acpi_aml_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - int ret = 0; - int size = 0; + ssize_t ret = 0; + ssize_t size = 0;
if (!count) return 0; diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c index e9b8e8305e23..8383d6329c64 100644 --- a/drivers/acpi/acpi_tad.c +++ b/drivers/acpi/acpi_tad.c @@ -563,6 +563,9 @@ static int acpi_tad_remove(struct platform_device *pdev)
pm_runtime_get_sync(dev);
+ if (dd->capabilities & ACPI_TAD_RT) + sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group); + if (dd->capabilities & ACPI_TAD_DC_WAKE) sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group);
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 9aab54797ded..503646808319 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c @@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void) return_ACPI_STATUS(AE_OK); }
+ if (!acpi_gbl_use_global_lock) { + return_ACPI_STATUS(AE_OK); + } + /* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index f8a56aae97a5..46003b642b59 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -74,6 +74,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc, struct fwnode_handle *parent) { struct acpi_data_node *dn; + acpi_handle scope = NULL; bool result;
dn = kzalloc(sizeof(*dn), GFP_KERNEL); @@ -86,29 +87,35 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc, INIT_LIST_HEAD(&dn->data.properties); INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(handle, desc, &dn->data); - - if (handle) { - acpi_handle scope; - acpi_status status; + /* + * The scope for the completion of relative pathname segments and + * subnode object lookup is the one of the namespace node (device) + * containing the object that has returned the package. That is, it's + * the scope of that object's parent device. + */ + if (handle) + acpi_get_parent(handle, &scope);
- /* - * The scope for the subnode object lookup is the one of the - * namespace node (device) containing the object that has - * returned the package. That is, it's the scope of that - * object's parent. - */ - status = acpi_get_parent(handle, &scope); - if (ACPI_SUCCESS(status) - && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, - &dn->fwnode)) - result = true; - } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data, - &dn->fwnode)) { + /* + * Extract properties from the _DSD-equivalent package pointed to by + * desc and use scope (if not NULL) for the completion of relative + * pathname segments. + * + * The extracted properties will be held in the new data node dn. + */ + result = acpi_extract_properties(scope, desc, &dn->data); + /* + * Look for subnodes in the _DSD-equivalent package pointed to by desc + * and create child nodes of dn if there are any. + */ + if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode)) result = true; - }
if (result) { + /* + * This will be NULL if the desc package is embedded in an outer + * _DSD-equivalent package and its scope cannot be determined. + */ dn->handle = handle; dn->data.pointer = desc; list_add_tail(&dn->sibling, list); @@ -120,35 +127,21 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc, return false; }
-static bool acpi_nondev_subnode_data_ok(acpi_handle handle, - const union acpi_object *link, - struct list_head *list, - struct fwnode_handle *parent) -{ - struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; - acpi_status status; - - status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf, - ACPI_TYPE_PACKAGE); - if (ACPI_FAILURE(status)) - return false; - - if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list, - parent)) - return true; - - ACPI_FREE(buf.pointer); - return false; -} - static bool acpi_nondev_subnode_ok(acpi_handle scope, const union acpi_object *link, struct list_head *list, struct fwnode_handle *parent) { + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; acpi_handle handle; acpi_status status;
+ /* + * If the scope is unknown, the _DSD-equivalent package being parsed + * was embedded in an outer _DSD-equivalent package as a result of + * direct evaluation of an object pointed to by a reference. In that + * case, using a pathname as the target object pointer is invalid. + */ if (!scope) return false;
@@ -157,7 +150,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope, if (ACPI_FAILURE(status)) return false;
- return acpi_nondev_subnode_data_ok(handle, link, list, parent); + status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf, + ACPI_TYPE_PACKAGE); + if (ACPI_FAILURE(status)) + return false; + + if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list, + parent)) + return true; + + ACPI_FREE(buf.pointer); + return false; }
static bool acpi_add_nondev_subnodes(acpi_handle scope, @@ -168,9 +171,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope, bool ret = false; int i;
+ /* + * Every element in the links package is expected to represent a link + * to a non-device node in a tree containing device-specific data. + */ for (i = 0; i < links->package.count; i++) { union acpi_object *link, *desc; - acpi_handle handle; bool result;
link = &links->package.elements[i]; @@ -178,26 +184,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope, if (link->package.count != 2) continue;
- /* The first one must be a string. */ + /* The first one (the key) must be a string. */ if (link->package.elements[0].type != ACPI_TYPE_STRING) continue;
- /* The second one may be a string, a reference or a package. */ + /* The second one (the target) may be a string or a package. */ switch (link->package.elements[1].type) { case ACPI_TYPE_STRING: + /* + * The string is expected to be a full pathname or a + * pathname segment relative to the given scope. That + * pathname is expected to point to an object returning + * a package that contains _DSD-equivalent information. + */ result = acpi_nondev_subnode_ok(scope, link, list, parent); break; - case ACPI_TYPE_LOCAL_REFERENCE: - handle = link->package.elements[1].reference.handle; - result = acpi_nondev_subnode_data_ok(handle, link, list, - parent); - break; case ACPI_TYPE_PACKAGE: + /* + * This happens when a reference is used in AML to + * point to the target. Since the target is expected + * to be a named object, a reference to it will cause it + * to be avaluated in place and its return package will + * be embedded in the links package at the location of + * the reference. + * + * The target package is expected to contain _DSD- + * equivalent information, but the scope in which it + * is located in the original AML is unknown. Thus + * it cannot contain pathname segments represented as + * strings because there is no way to build full + * pathnames out of them. + */ + acpi_handle_debug(scope, "subnode %s: Unknown scope\n", + link->package.elements[0].string.pointer); desc = &link->package.elements[1]; result = acpi_nondev_subnode_extract(desc, NULL, link, list, parent); break; + case ACPI_TYPE_LOCAL_REFERENCE: + /* + * It is not expected to see any local references in + * the links package because referencing a named object + * should cause it to be evaluated in place. + */ + acpi_handle_info(scope, "subnode %s: Unexpected reference\n", + link->package.elements[0].string.pointer); + fallthrough; default: result = false; break; @@ -357,6 +390,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data) struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) { + if (!dn->handle) + continue; + acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
acpi_untie_nondev_subnodes(&dn->data); @@ -371,6 +407,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data) acpi_status status; bool ret;
+ if (!dn->handle) + continue; + status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn); if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) { acpi_handle_err(dn->handle, "Can't tag data node\n"); diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index 60c1df048fa2..7e76e8a0ccdc 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -164,7 +164,6 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) { struct mhi_event *mhi_event = mhi_cntrl->mhi_event; - struct device *dev = &mhi_cntrl->mhi_dev->dev; unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; int i, ret;
@@ -191,7 +190,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) continue;
if (mhi_event->irq >= mhi_cntrl->nr_irqs) { - dev_err(dev, "irq %d not available for event ring\n", + dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n", mhi_event->irq); ret = -EINVAL; goto error_request; @@ -202,7 +201,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) irq_flags, "mhi", mhi_event); if (ret) { - dev_err(dev, "Error requesting irq:%d for ev:%d\n", + dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n", mhi_cntrl->irq[mhi_event->irq], i); goto error_request; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 653e07171dc6..a475d0bd2685 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -39,7 +39,9 @@
#define IPMI_DRIVER_VERSION "39.2"
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user); +static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, + struct ipmi_user *user); static int ipmi_init_msghandler(void); static void smi_recv_tasklet(struct tasklet_struct *t); static void handle_new_recv_msgs(struct ipmi_smi *intf); @@ -939,13 +941,11 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) * risk. At this moment, simply skip it in that case. */ ipmi_free_recv_msg(msg); - atomic_dec(&msg->user->nr_msgs); } else { int index; struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
if (user) { - atomic_dec(&user->nr_msgs); user->handler->ipmi_recv_hndl(msg, user->handler_data); release_ipmi_user(user, index); } else { @@ -1634,8 +1634,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val) spin_unlock_irqrestore(&intf->events_lock, flags);
list_for_each_entry_safe(msg, msg2, &msgs, link) { - msg->user = user; - kref_get(&user->refcount); + ipmi_set_recv_msg_user(msg, user); deliver_local_response(intf, msg); }
@@ -2309,22 +2308,18 @@ static int i_ipmi_request(struct ipmi_user *user, struct ipmi_recv_msg *recv_msg; int rv = 0;
- if (user) { - if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { - /* Decrement will happen at the end of the routine. */ - rv = -EBUSY; - goto out; - } - } - - if (supplied_recv) + if (supplied_recv) { recv_msg = supplied_recv; - else { - recv_msg = ipmi_alloc_recv_msg(); - if (recv_msg == NULL) { - rv = -ENOMEM; - goto out; + recv_msg->user = user; + if (user) { + atomic_inc(&user->nr_msgs); + /* The put happens when the message is freed. */ + kref_get(&user->refcount); } + } else { + recv_msg = ipmi_alloc_recv_msg(user); + if (IS_ERR(recv_msg)) + return PTR_ERR(recv_msg); } recv_msg->user_msg_data = user_msg_data;
@@ -2335,8 +2330,7 @@ static int i_ipmi_request(struct ipmi_user *user, if (smi_msg == NULL) { if (!supplied_recv) ipmi_free_recv_msg(recv_msg); - rv = -ENOMEM; - goto out; + return -ENOMEM; } }
@@ -2346,10 +2340,6 @@ static int i_ipmi_request(struct ipmi_user *user, goto out_err; }
- recv_msg->user = user; - if (user) - /* The put happens when the message is freed. */ - kref_get(&user->refcount); recv_msg->msgid = msgid; /* * Store the message to send in the receive message so timeout @@ -2378,8 +2368,10 @@ static int i_ipmi_request(struct ipmi_user *user,
if (rv) { out_err: - ipmi_free_smi_msg(smi_msg); - ipmi_free_recv_msg(recv_msg); + if (!supplied_smi) + ipmi_free_smi_msg(smi_msg); + if (!supplied_recv) + ipmi_free_recv_msg(recv_msg); } else { dev_dbg(intf->si_dev, "Send: %*ph\n", smi_msg->data_size, smi_msg->data); @@ -2388,9 +2380,6 @@ static int i_ipmi_request(struct ipmi_user *user, } rcu_read_unlock();
-out: - if (rv && user) - atomic_dec(&user->nr_msgs); return rv; }
@@ -3883,7 +3872,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_ipmb_addr *ipmb_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ @@ -3904,9 +3893,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock();
if (user == NULL) { @@ -3941,47 +3929,41 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, rv = -1; } rcu_read_unlock(); - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; - ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; - ipmb_addr->slave_addr = msg->rsp[6]; - ipmb_addr->lun = msg->rsp[7] & 3; - ipmb_addr->channel = msg->rsp[3] & 0xf; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; + ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; + ipmb_addr->slave_addr = msg->rsp[6]; + ipmb_addr->lun = msg->rsp[7] & 3; + ipmb_addr->channel = msg->rsp[3] & 0xf;
- /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = msg->rsp[7] >> 2; - recv_msg->msg.netfn = msg->rsp[4] >> 2; - recv_msg->msg.cmd = msg->rsp[8]; - recv_msg->msg.data = recv_msg->msg_data; + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[7] >> 2; + recv_msg->msg.netfn = msg->rsp[4] >> 2; + recv_msg->msg.cmd = msg->rsp[8]; + recv_msg->msg.data = recv_msg->msg_data;
- /* - * We chop off 10, not 9 bytes because the checksum - * at the end also needs to be removed. - */ - recv_msg->msg.data_len = msg->rsp_size - 10; - memcpy(recv_msg->msg_data, &msg->rsp[9], - msg->rsp_size - 10); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * We chop off 10, not 9 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 10; + memcpy(recv_msg->msg_data, &msg->rsp[9], + msg->rsp_size - 10); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; }
return rv; @@ -3994,7 +3976,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, int rv = 0; struct ipmi_user *user = NULL; struct ipmi_ipmb_direct_addr *daddr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; unsigned char netfn = msg->rsp[0] >> 2; unsigned char cmd = msg->rsp[3];
@@ -4003,9 +3985,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock();
if (user == NULL) { @@ -4032,44 +4013,38 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, rv = -1; } rcu_read_unlock(); - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; - daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; - daddr->channel = 0; - daddr->slave_addr = msg->rsp[1]; - daddr->rs_lun = msg->rsp[0] & 3; - daddr->rq_lun = msg->rsp[2] & 3; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; + daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; + daddr->channel = 0; + daddr->slave_addr = msg->rsp[1]; + daddr->rs_lun = msg->rsp[0] & 3; + daddr->rq_lun = msg->rsp[2] & 3;
- /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = (msg->rsp[2] >> 2); - recv_msg->msg.netfn = msg->rsp[0] >> 2; - recv_msg->msg.cmd = msg->rsp[3]; - recv_msg->msg.data = recv_msg->msg_data; - - recv_msg->msg.data_len = msg->rsp_size - 4; - memcpy(recv_msg->msg_data, msg->rsp + 4, - msg->rsp_size - 4); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = (msg->rsp[2] >> 2); + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[3]; + recv_msg->msg.data = recv_msg->msg_data; + + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, msg->rsp + 4, + msg->rsp_size - 4); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; }
return rv; @@ -4183,7 +4158,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_lan_addr *lan_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 12) { /* Message not big enough, just ignore it. */ @@ -4204,9 +4179,8 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock();
if (user == NULL) { @@ -4218,49 +4192,44 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, * them to be freed. */ rv = 0; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* Extract the source address from the data. */ - lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; - lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; - lan_addr->session_handle = msg->rsp[4]; - lan_addr->remote_SWID = msg->rsp[8]; - lan_addr->local_SWID = msg->rsp[5]; - lan_addr->lun = msg->rsp[9] & 3; - lan_addr->channel = msg->rsp[3] & 0xf; - lan_addr->privilege = msg->rsp[3] >> 4; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; + lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; + lan_addr->session_handle = msg->rsp[4]; + lan_addr->remote_SWID = msg->rsp[8]; + lan_addr->local_SWID = msg->rsp[5]; + lan_addr->lun = msg->rsp[9] & 3; + lan_addr->channel = msg->rsp[3] & 0xf; + lan_addr->privilege = msg->rsp[3] >> 4;
- /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = msg->rsp[9] >> 2; - recv_msg->msg.netfn = msg->rsp[6] >> 2; - recv_msg->msg.cmd = msg->rsp[10]; - recv_msg->msg.data = recv_msg->msg_data; + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[9] >> 2; + recv_msg->msg.netfn = msg->rsp[6] >> 2; + recv_msg->msg.cmd = msg->rsp[10]; + recv_msg->msg.data = recv_msg->msg_data;
- /* - * We chop off 12, not 11 bytes because the checksum - * at the end also needs to be removed. - */ - recv_msg->msg.data_len = msg->rsp_size - 12; - memcpy(recv_msg->msg_data, &msg->rsp[11], - msg->rsp_size - 12); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * We chop off 12, not 11 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 12; + memcpy(recv_msg->msg_data, &msg->rsp[11], + msg->rsp_size - 12); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; }
return rv; @@ -4282,7 +4251,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_system_interface_addr *smi_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL;
/* * We expect the OEM SW to perform error checking @@ -4311,9 +4280,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock();
if (user == NULL) { @@ -4326,48 +4294,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, */
rv = 0; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_user); - } else { - /* - * OEM Messages are expected to be delivered via - * the system interface to SMS software. We might - * need to visit this again depending on OEM - * requirements - */ - smi_addr = ((struct ipmi_system_interface_addr *) - &recv_msg->addr); - smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; - smi_addr->channel = IPMI_BMC_CHANNEL; - smi_addr->lun = msg->rsp[0] & 3; - - recv_msg->user = user; - recv_msg->user_msg_data = NULL; - recv_msg->recv_type = IPMI_OEM_RECV_TYPE; - recv_msg->msg.netfn = msg->rsp[0] >> 2; - recv_msg->msg.cmd = msg->rsp[1]; - recv_msg->msg.data = recv_msg->msg_data; + } else if (!IS_ERR(recv_msg)) { + /* + * OEM Messages are expected to be delivered via + * the system interface to SMS software. We might + * need to visit this again depending on OEM + * requirements + */ + smi_addr = ((struct ipmi_system_interface_addr *) + &recv_msg->addr); + smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr->channel = IPMI_BMC_CHANNEL; + smi_addr->lun = msg->rsp[0] & 3; + + recv_msg->user_msg_data = NULL; + recv_msg->recv_type = IPMI_OEM_RECV_TYPE; + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[1]; + recv_msg->msg.data = recv_msg->msg_data;
- /* - * The message starts at byte 4 which follows the - * Channel Byte in the "GET MESSAGE" command - */ - recv_msg->msg.data_len = msg->rsp_size - 4; - memcpy(recv_msg->msg_data, &msg->rsp[4], - msg->rsp_size - 4); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * The message starts at byte 4 which follows the + * Channel Byte in the "GET MESSAGE" command + */ + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, &msg->rsp[4], + msg->rsp_size - 4); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; }
return rv; @@ -4426,8 +4388,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, if (!user->gets_events) continue;
- recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { + recv_msg = ipmi_alloc_recv_msg(user); + if (IS_ERR(recv_msg)) { rcu_read_unlock(); list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { @@ -4446,8 +4408,6 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, deliver_count++;
copy_event_into_recv_msg(recv_msg, msg); - recv_msg->user = user; - kref_get(&user->refcount); list_add_tail(&recv_msg->link, &msgs); } srcu_read_unlock(&intf->users_srcu, index); @@ -4463,8 +4423,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, * No one to receive the message, put it in queue if there's * not already too many things in the queue. */ - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { + recv_msg = ipmi_alloc_recv_msg(NULL); + if (IS_ERR(recv_msg)) { /* * We couldn't allocate memory for the * message, so requeue it for handling @@ -5156,27 +5116,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg) kfree(msg); }
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user) { struct ipmi_recv_msg *rv;
+ if (user) { + if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { + atomic_dec(&user->nr_msgs); + return ERR_PTR(-EBUSY); + } + } + rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); - if (rv) { - rv->user = NULL; - rv->done = free_recv_msg; - atomic_inc(&recv_msg_inuse_count); + if (!rv) { + if (user) + atomic_dec(&user->nr_msgs); + return ERR_PTR(-ENOMEM); } + + rv->user = user; + rv->done = free_recv_msg; + if (user) + kref_get(&user->refcount); + atomic_inc(&recv_msg_inuse_count); return rv; }
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) { - if (msg->user && !oops_in_progress) + if (msg->user && !oops_in_progress) { + atomic_dec(&msg->user->nr_msgs); kref_put(&msg->user->refcount, free_user); + } msg->done(msg); } EXPORT_SYMBOL(ipmi_free_recv_msg);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, + struct ipmi_user *user) +{ + WARN_ON_ONCE(msg->user); /* User should not be set. */ + msg->user = user; + atomic_inc(&user->nr_msgs); + kref_get(&user->refcount); +} + static atomic_t panic_done_count = ATOMIC_INIT(0);
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index caf9ba2dc0b1..968b891bcb03 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -878,8 +878,8 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, * will call disable_irq which undoes all of the above. */ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { - tpm_tis_write8(priv, original_int_vec, - TPM_INT_VECTOR(priv->locality)); + tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), + original_int_vec); rc = -1; }
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c index 5104d4025484..a5e8f5949d77 100644 --- a/drivers/clk/at91/clk-peripheral.c +++ b/drivers/clk/at91/clk-peripheral.c @@ -275,8 +275,11 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw, long best_diff = LONG_MIN; u32 shift;
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) - return parent_rate; + if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) { + req->rate = parent_rate; + + return 0; + }
/* Fist step: check the available dividers. */ for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c index fcd410461d3b..516b5830f16e 100644 --- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c +++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c @@ -103,7 +103,7 @@ static const struct mtk_gate infra_ao_clks[] = { GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28), GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29), /* INFRA_AO1 */ - GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0), + GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "top_hdmi_xtal", 0), GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1), GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2), GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4), diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c index c8593554239d..93fa67294e5a 100644 --- a/drivers/clk/mediatek/clk-mux.c +++ b/drivers/clk/mediatek/clk-mux.c @@ -132,9 +132,7 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index) static int mtk_clk_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { - struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); - - return clk_mux_determine_rate_flags(hw, req, mux->data->flags); + return clk_mux_determine_rate_flags(hw, req, 0); }
const struct clk_ops mtk_mux_clr_set_upd_ops = { diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c index 69ebf65081b8..bbd7d64038fa 100644 --- a/drivers/clk/nxp/clk-lpc18xx-cgu.c +++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c @@ -371,23 +371,25 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw, return 0; }
-static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int lpc18xx_pll0_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { unsigned long m;
- if (*prate < rate) { + if (req->best_parent_rate < req->rate) { pr_warn("%s: pll dividers not supported\n", __func__); return -EINVAL; }
- m = DIV_ROUND_UP_ULL(*prate, rate * 2); - if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) { - pr_warn("%s: unable to support rate %lu\n", __func__, rate); + m = DIV_ROUND_UP_ULL(req->best_parent_rate, req->rate * 2); + if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) { + pr_warn("%s: unable to support rate %lu\n", __func__, req->rate); return -EINVAL; }
- return 2 * *prate * m; + req->rate = 2 * req->best_parent_rate * m; + + return 0; }
static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, @@ -403,7 +405,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, }
m = DIV_ROUND_UP_ULL(parent_rate, rate * 2); - if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) { + if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) { pr_warn("%s: unable to support rate %lu\n", __func__, rate); return -EINVAL; } @@ -444,7 +446,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops lpc18xx_pll0_ops = { .recalc_rate = lpc18xx_pll0_recalc_rate, - .round_rate = lpc18xx_pll0_round_rate, + .determine_rate = lpc18xx_pll0_determine_rate, .set_rate = lpc18xx_pll0_set_rate, };
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c index 39241662a412..3bc56a3c6b24 100644 --- a/drivers/clk/tegra/clk-bpmp.c +++ b/drivers/clk/tegra/clk-bpmp.c @@ -603,7 +603,7 @@ static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp,
bpmp->num_clocks = count;
- bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL); + bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(*bpmp->clocks), GFP_KERNEL); if (!bpmp->clocks) return -ENOMEM;
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c index e95fdc49c226..bbceb0289d45 100644 --- a/drivers/clocksource/clps711x-timer.c +++ b/drivers/clocksource/clps711x-timer.c @@ -78,24 +78,33 @@ static int __init clps711x_timer_init(struct device_node *np) unsigned int irq = irq_of_parse_and_map(np, 0); struct clk *clock = of_clk_get(np, 0); void __iomem *base = of_iomap(np, 0); + int ret = 0;
if (!base) return -ENOMEM; - if (!irq) - return -EINVAL; - if (IS_ERR(clock)) - return PTR_ERR(clock); + if (!irq) { + ret = -EINVAL; + goto unmap_io; + } + if (IS_ERR(clock)) { + ret = PTR_ERR(clock); + goto unmap_io; + }
switch (of_alias_get_id(np, "timer")) { case CLPS711X_CLKSRC_CLOCKSOURCE: clps711x_clksrc_init(clock, base); break; case CLPS711X_CLKSRC_CLOCKEVENT: - return _clps711x_clkevt_init(clock, base, irq); + ret = _clps711x_clkevt_init(clock, base, irq); + break; default: - return -EINVAL; + ret = -EINVAL; + break; }
- return 0; +unmap_io: + iounmap(base); + return ret; } TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3ded51db833d..5af471a7b752 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1308,10 +1308,10 @@ static void update_qos_request(enum freq_qos_req_type type) continue;
req = policy->driver_data; - cpufreq_cpu_put(policy); - - if (!req) + if (!req) { + cpufreq_cpu_put(policy); continue; + }
if (hwp_active) intel_pstate_get_hwp_cap(cpu); @@ -1327,6 +1327,8 @@ static void update_qos_request(enum freq_qos_req_type type)
if (freq_qos_update_request(req, freq) < 0) pr_warn("Failed to update freq constraint: CPU%d\n", i); + + cpufreq_cpu_put(policy); } }
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 6c88827f4e62..ee57676bbee1 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -86,10 +86,14 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy, { struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); struct cpufreq_frequency_table *tbl = policy->freq_table + index; - unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset; + unsigned int edvd_offset; u32 edvd_val = tbl->driver_data; + u32 cpu;
- writel(edvd_val, data->regs + edvd_offset); + for_each_cpu(cpu, policy->cpus) { + edvd_offset = data->cpus[cpu].edvd_offset; + writel(edvd_val, data->regs + edvd_offset); + }
return 0; } diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 4edac724983a..0b3c917d505d 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -158,6 +158,14 @@ static inline int performance_multiplier(unsigned int nr_iowaiters)
static DEFINE_PER_CPU(struct menu_device, menu_devices);
+static void menu_update_intervals(struct menu_device *data, unsigned int interval_us) +{ + /* Update the repeating-pattern data. */ + data->intervals[data->interval_ptr++] = interval_us; + if (data->interval_ptr >= INTERVALS) + data->interval_ptr = 0; +} + static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
/* @@ -288,6 +296,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (data->needs_update) { menu_update(drv, dev); data->needs_update = 0; + } else if (!dev->last_residency_ns) { + /* + * This happens when the driver rejects the previously selected + * idle state and returns an error, so update the recent + * intervals table to prevent invalid information from being + * used going forward. + */ + menu_update_intervals(data, UINT_MAX); }
/* determine the expected residency time, round up */ @@ -542,10 +558,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
- /* update the repeating-pattern data */ - data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns); - if (data->interval_ptr >= INTERVALS) - data->interval_ptr = 0; + menu_update_intervals(data, ktime_to_us(measured_ns)); }
/** diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c index ef73b0028b4d..47dcf0bf4fb1 100644 --- a/drivers/crypto/aspeed/aspeed-hace-crypto.c +++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c @@ -335,7 +335,7 @@ static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
} else { dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents, - DMA_TO_DEVICE); + DMA_FROM_DEVICE); dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); } diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 8b7bc1076e0d..803966256aaa 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -548,7 +548,7 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
if (err && (dd->flags & TDES_FLAGS_FAST)) { dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); }
return err; diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index bf19dd66c213..6a0a5b16663e 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -225,11 +225,16 @@ EXPORT_SYMBOL(meson_sm_call_write); struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node) { struct platform_device *pdev = of_find_device_by_node(sm_node); + struct meson_sm_firmware *fw;
if (!pdev) return NULL;
- return platform_get_drvdata(pdev); + fw = platform_get_drvdata(pdev); + + put_device(&pdev->dev); + + return fw; } EXPORT_SYMBOL_GPL(meson_sm_get);
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c index c00968ce7a56..26d70ac90933 100644 --- a/drivers/gpio/gpio-wcd934x.c +++ b/drivers/gpio/gpio-wcd934x.c @@ -101,8 +101,7 @@ static int wcd_gpio_probe(struct platform_device *pdev) chip->base = -1; chip->ngpio = WCD934X_NPINS; chip->label = dev_name(dev); - chip->of_gpio_n_cells = 2; - chip->can_sleep = false; + chip->can_sleep = true;
return devm_gpiochip_add_data(dev, chip, data); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index 670d5ab9d998..f97c18267708 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration( REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
if (data->taps.h_taps + data->taps.v_taps <= 2) { - /* Set bypass */ - - /* DCE6 has no SCL_MODE register, skip scale mode programming */ + /* Disable scaler functionality */ + REG_WRITE(SCL_SCALER_ENABLE, 0);
+ /* Clear registers that can cause glitches even when the scaler is off */ + REG_WRITE(SCL_TAP_CONTROL, 0); + REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0); + REG_WRITE(SCL_F_SHARP_CONTROL, 0); return false; }
@@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration( SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1, SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
- /* DCE6 has no SCL_MODE register, skip scale mode programming */ + REG_WRITE(SCL_SCALER_ENABLE, 1);
/* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */
@@ -502,6 +505,8 @@ static void dce60_transform_set_scaler( REG_SET(DC_LB_MEM_SIZE, 0, DC_LB_MEM_SIZE, xfm_dce->lb_memory_size);
+ REG_WRITE(SCL_UPDATE, 0x00010000); + /* Clear SCL_F_SHARP_CONTROL value to 0 */ REG_WRITE(SCL_F_SHARP_CONTROL, 0);
@@ -527,8 +532,7 @@ static void dce60_transform_set_scaler( if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) { /* 4. Program vertical filters */ if (xfm_dce->filter_v == NULL) - REG_SET(SCL_VERT_FILTER_CONTROL, 0, - SCL_V_2TAP_HARDCODE_COEF_EN, 0); + REG_WRITE(SCL_VERT_FILTER_CONTROL, 0); program_multi_taps_filter( xfm_dce, data->taps.v_taps, @@ -542,8 +546,7 @@ static void dce60_transform_set_scaler(
/* 5. Program horizontal filters */ if (xfm_dce->filter_h == NULL) - REG_SET(SCL_HORZ_FILTER_CONTROL, 0, - SCL_H_2TAP_HARDCODE_COEF_EN, 0); + REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0); program_multi_taps_filter( xfm_dce, data->taps.h_taps, @@ -566,6 +569,8 @@ static void dce60_transform_set_scaler( /* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */
/* DCE6 DATA_FORMAT register does not support ALPHA_EN */ + + REG_WRITE(SCL_UPDATE, 0); } #endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h index cbce194ec7b8..eb716e8337e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h @@ -155,6 +155,9 @@ SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \ SRI(VIEWPORT_START, SCL, id), \ SRI(VIEWPORT_SIZE, SCL, id), \ + SRI(SCL_SCALER_ENABLE, SCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \ SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \ SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \ SRI(SCL_VERT_FILTER_INIT, SCL, id), \ @@ -590,6 +593,7 @@ struct dce_transform_registers { uint32_t SCL_VERT_FILTER_SCALE_RATIO; uint32_t SCL_HORZ_FILTER_INIT; #if defined(CONFIG_DRM_AMD_DC_SI) + uint32_t SCL_SCALER_ENABLE; uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA; uint32_t SCL_HORZ_FILTER_INIT_CHROMA; #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h index 9de01ae574c0..067eddd9c62d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h @@ -4115,6 +4115,7 @@ #define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55 #define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40 #define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41 +#define mmSCL0_SCL_SCALER_ENABLE 0x1B42 #define mmSCL0_SCL_CONTROL 0x1B44 #define mmSCL0_SCL_DEBUG 0x1B6A #define mmSCL0_SCL_DEBUG2 0x1B69 @@ -4144,6 +4145,7 @@ #define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55 #define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40 #define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41 +#define mmSCL1_SCL_SCALER_ENABLE 0x1E42 #define mmSCL1_SCL_CONTROL 0x1E44 #define mmSCL1_SCL_DEBUG 0x1E6A #define mmSCL1_SCL_DEBUG2 0x1E69 @@ -4173,6 +4175,7 @@ #define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155 #define mmSCL2_SCL_COEF_RAM_SELECT 0x4140 #define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141 +#define mmSCL2_SCL_SCALER_ENABLE 0x4142 #define mmSCL2_SCL_CONTROL 0x4144 #define mmSCL2_SCL_DEBUG 0x416A #define mmSCL2_SCL_DEBUG2 0x4169 @@ -4202,6 +4205,7 @@ #define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455 #define mmSCL3_SCL_COEF_RAM_SELECT 0x4440 #define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441 +#define mmSCL3_SCL_SCALER_ENABLE 0x4442 #define mmSCL3_SCL_CONTROL 0x4444 #define mmSCL3_SCL_DEBUG 0x446A #define mmSCL3_SCL_DEBUG2 0x4469 @@ -4231,6 +4235,7 @@ #define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755 #define mmSCL4_SCL_COEF_RAM_SELECT 0x4740 #define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741 +#define mmSCL4_SCL_SCALER_ENABLE 0x4742 #define mmSCL4_SCL_CONTROL 0x4744 #define mmSCL4_SCL_DEBUG 0x476A #define mmSCL4_SCL_DEBUG2 0x4769 @@ -4260,6 +4265,7 @@ #define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55 #define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40 #define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41 +#define mmSCL5_SCL_SCALER_ENABLE 0x4A42 #define mmSCL5_SCL_CONTROL 0x4A44 #define mmSCL5_SCL_DEBUG 0x4A6A #define mmSCL5_SCL_DEBUG2 0x4A69 @@ -4287,6 +4293,7 @@ #define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55 #define mmSCL_COEF_RAM_SELECT 0x1B40 #define mmSCL_COEF_RAM_TAP_DATA 0x1B41 +#define mmSCL_SCALER_ENABLE 0x1B42 #define mmSCL_CONTROL 0x1B44 #define mmSCL_DEBUG 0x1B6A #define mmSCL_DEBUG2 0x1B69 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h index bd8085ec54ed..da5596fbfdcb 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h @@ -8648,6 +8648,8 @@ #define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000 #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000 +#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L +#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000 #define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L #define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000 #define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2953d6a6d390..ab571b76b129 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -791,7 +791,7 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, nvif_vmm_put(vmm, &old_mem->vma[1]); nvif_vmm_put(vmm, &old_mem->vma[0]); } - return 0; + return ret; }
static int diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index f5c4a40fb16d..27f33297fcf3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -339,8 +339,10 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx, hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key); } node->res = vmw_resource_reference_unless_doomed(res); - if (!node->res) + if (!node->res) { + hash_del_rcu(&node->hash.head); return -ESRCH; + }
node->first_usage = 1; if (!res->dev_priv->has_mob) { @@ -690,7 +692,7 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx) hash_del_rcu(&val->hash.head);
list_for_each_entry(val, &ctx->resource_ctx_list, head) - hash_del_rcu(&entry->hash.head); + hash_del_rcu(&val->hash.head);
ctx->sw_context = NULL; } diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c index 3db021b96ae9..e1a1f50b4885 100644 --- a/drivers/iio/adc/xilinx-ams.c +++ b/drivers/iio/adc/xilinx-ams.c @@ -118,7 +118,7 @@ #define AMS_ALARM_THRESHOLD_OFF_10 0x10 #define AMS_ALARM_THRESHOLD_OFF_20 0x20
-#define AMS_ALARM_THR_DIRECT_MASK BIT(1) +#define AMS_ALARM_THR_DIRECT_MASK BIT(0) #define AMS_ALARM_THR_MIN 0x0000 #define AMS_ALARM_THR_MAX (BIT(16) - 1)
@@ -385,6 +385,29 @@ static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask) ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg); }
+static void ams_unmask(struct ams *ams) +{ + unsigned int status, unmask; + + status = readl(ams->base + AMS_ISR_0); + + /* Clear those bits which are not active anymore */ + unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm; + + /* Clear status of disabled alarm */ + unmask |= ams->intr_mask; + + ams->current_masked_alarm &= status; + + /* Also clear those which are masked out anyway */ + ams->current_masked_alarm &= ~ams->intr_mask; + + /* Clear the interrupts before we unmask them */ + writel(unmask, ams->base + AMS_ISR_0); + + ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK); +} + static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask) { unsigned long flags; @@ -397,6 +420,7 @@ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
spin_lock_irqsave(&ams->intr_lock, flags); ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask); + ams_unmask(ams); spin_unlock_irqrestore(&ams->intr_lock, flags); }
@@ -1025,28 +1049,9 @@ static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events) static void ams_unmask_worker(struct work_struct *work) { struct ams *ams = container_of(work, struct ams, ams_unmask_work.work); - unsigned int status, unmask;
spin_lock_irq(&ams->intr_lock); - - status = readl(ams->base + AMS_ISR_0); - - /* Clear those bits which are not active anymore */ - unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm; - - /* Clear status of disabled alarm */ - unmask |= ams->intr_mask; - - ams->current_masked_alarm &= status; - - /* Also clear those which are masked out anyway */ - ams->current_masked_alarm &= ~ams->intr_mask; - - /* Clear the interrupts before we unmask them */ - writel(unmask, ams->base + AMS_ISR_0); - - ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK); - + ams_unmask(ams); spin_unlock_irq(&ams->intr_lock);
/* If still pending some alarm re-trigger the timer */ diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c index e0b7f658d611..cf9cf90cd6e2 100644 --- a/drivers/iio/dac/ad5360.c +++ b/drivers/iio/dac/ad5360.c @@ -262,7 +262,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set, unsigned int clr) { struct ad5360_state *st = iio_priv(indio_dev); - unsigned int ret; + int ret;
mutex_lock(&st->lock);
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c index 7644acfd879e..9228e3cee1b8 100644 --- a/drivers/iio/dac/ad5421.c +++ b/drivers/iio/dac/ad5421.c @@ -186,7 +186,7 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set, unsigned int clr) { struct ad5421_state *st = iio_priv(indio_dev); - unsigned int ret; + int ret;
mutex_lock(&st->lock);
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c index 4abf80f75ef5..03e75261e891 100644 --- a/drivers/iio/frequency/adf4350.c +++ b/drivers/iio/frequency/adf4350.c @@ -143,6 +143,19 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq) return -EINVAL;
+ st->r4_rf_div_sel = 0; + + /* + * !\TODO: The below computation is making sure we get a power of 2 + * shift (st->r4_rf_div_sel) so that freq becomes higher or equal to + * ADF4350_MIN_VCO_FREQ. This might be simplified with fls()/fls_long() + * and friends. + */ + while (freq < ADF4350_MIN_VCO_FREQ) { + freq <<= 1; + st->r4_rf_div_sel++; + } + if (freq > ADF4350_MAX_FREQ_45_PRESC) { prescaler = ADF4350_REG1_PRESCALER; mdiv = 75; @@ -151,13 +164,6 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) mdiv = 23; }
- st->r4_rf_div_sel = 0; - - while (freq < ADF4350_MIN_VCO_FREQ) { - freq <<= 1; - st->r4_rf_div_sel++; - } - /* * Allow a predefined reference division factor * if not set, compute our own diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index 9dec4ad38c0d..f955c3d01fef 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -730,10 +730,6 @@ static int __maybe_unused inv_icm42600_resume(struct device *dev) if (ret) goto out_unlock;
- pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - /* restore sensors state */ ret = inv_icm42600_set_pwr_mgmt0(st, st->suspended.gyro, st->suspended.accel, diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 5418f3d6eacf..2e7759bdb1df 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4563,7 +4563,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) }
if (info->ats_supported && ecap_prs(iommu->ecap) && - pci_pri_supported(pdev)) + ecap_pds(iommu->ecap) && pci_pri_supported(pdev)) info->pri_supported = 1; } } diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index d097f45b0e5f..6a48b832950d 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -618,11 +618,8 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata) i = pdata->num_mboxes; for (; i >= 0; i--) { ipi_mbox = &pdata->ipi_mboxes[i]; - if (ipi_mbox->dev.parent) { - mbox_controller_unregister(&ipi_mbox->mbox); - if (device_is_registered(&ipi_mbox->dev)) - device_unregister(&ipi_mbox->dev); - } + if (device_is_registered(&ipi_mbox->dev)) + device_unregister(&ipi_mbox->dev); } }
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c index 46d91cd0870c..da36f9cec0b8 100644 --- a/drivers/media/i2c/mt9v111.c +++ b/drivers/media/i2c/mt9v111.c @@ -534,8 +534,8 @@ static int mt9v111_calc_frame_rate(struct mt9v111_dev *mt9v111, static int mt9v111_hw_config(struct mt9v111_dev *mt9v111) { struct i2c_client *c = mt9v111->client; - unsigned int ret; u16 outfmtctrl2; + int ret;
/* Force device reset. */ ret = __mt9v111_hw_reset(mt9v111); diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c index 94abd042045d..6fd8885f5928 100644 --- a/drivers/media/mc/mc-devnode.c +++ b/drivers/media/mc/mc-devnode.c @@ -50,11 +50,6 @@ static void media_devnode_release(struct device *cd) { struct media_devnode *devnode = to_media_devnode(cd);
- mutex_lock(&media_devnode_lock); - /* Mark device node number as free */ - clear_bit(devnode->minor, media_devnode_nums); - mutex_unlock(&media_devnode_lock); - /* Release media_devnode and perform other cleanups as needed. */ if (devnode->release) devnode->release(devnode); @@ -283,6 +278,7 @@ void media_devnode_unregister(struct media_devnode *devnode) /* Delete the cdev on this minor as well */ cdev_device_del(&devnode->cdev, &devnode->dev); devnode->media_dev = NULL; + clear_bit(devnode->minor, media_devnode_nums); mutex_unlock(&media_devnode_lock);
put_device(&devnode->dev); diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c index bdb8f512be57..9b707183c96c 100644 --- a/drivers/media/mc/mc-entity.c +++ b/drivers/media/mc/mc-entity.c @@ -665,7 +665,7 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, * (already discovered through iterating over links) and pads * not internally connected. */ - if (origin == local || !local->num_links || + if (origin == local || local->num_links || !media_entity_has_pad_interdep(origin->entity, origin->index, local->index)) continue; diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c index 013694bfcb1c..7cbb2d586932 100644 --- a/drivers/media/pci/cx18/cx18-queue.c +++ b/drivers/media/pci/cx18/cx18-queue.c @@ -379,15 +379,22 @@ int cx18_stream_alloc(struct cx18_stream *s) break; }
+ buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev, + buf->buf, s->buf_size, + s->dma); + if (dma_mapping_error(&s->cx->pci_dev->dev, buf->dma_handle)) { + kfree(buf->buf); + kfree(mdl); + kfree(buf); + break; + } + INIT_LIST_HEAD(&mdl->list); INIT_LIST_HEAD(&mdl->buf_list); mdl->id = s->mdl_base_idx; /* a somewhat safe value */ cx18_enqueue(s, mdl, &s->q_idle);
INIT_LIST_HEAD(&buf->list); - buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev, - buf->buf, s->buf_size, - s->dma); cx18_buf_sync_for_cpu(s, buf); list_add_tail(&buf->list, &s->buf_pool); } diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c index b7aaa8b4a784..e39bf64c5c71 100644 --- a/drivers/media/pci/ivtv/ivtv-irq.c +++ b/drivers/media/pci/ivtv/ivtv-irq.c @@ -351,7 +351,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
/* Insert buffer block for YUV if needed */ if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) { - if (yi->blanking_dmaptr) { + if (yi->blanking_ptr) { s->sg_pending[idx].src = yi->blanking_dmaptr; s->sg_pending[idx].dst = offset; s->sg_pending[idx].size = 720 * 16; diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c index bd0b80331602..d5e3f1fe1029 100644 --- a/drivers/media/pci/ivtv/ivtv-yuv.c +++ b/drivers/media/pci/ivtv/ivtv-yuv.c @@ -126,7 +126,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma, ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
/* If we've offset the y plane, ensure top area is blanked */ - if (f->offset_y && yi->blanking_dmaptr) { + if (f->offset_y && yi->blanking_ptr) { dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16); dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr); dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]); @@ -930,6 +930,12 @@ static void ivtv_yuv_init(struct ivtv *itv) yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev, yi->blanking_ptr, 720 * 16, DMA_TO_DEVICE); + if (dma_mapping_error(&itv->pdev->dev, yi->blanking_dmaptr)) { + kfree(yi->blanking_ptr); + yi->blanking_ptr = NULL; + yi->blanking_dmaptr = 0; + IVTV_DEBUG_WARN("Failed to dma_map yuv blanking buffer\n"); + } } else { yi->blanking_dmaptr = 0; IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n"); diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index d9a9017b96ea..45f60a5f61b9 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -735,11 +735,11 @@ int lirc_register(struct rc_dev *dev)
cdev_init(&dev->lirc_cdev, &lirc_fops);
+ get_device(&dev->dev); + err = cdev_device_add(&dev->lirc_cdev, &dev->lirc_dev); if (err) - goto out_ida; - - get_device(&dev->dev); + goto out_put_device;
switch (dev->driver_type) { case RC_DRIVER_SCANCODE: @@ -763,7 +763,8 @@ int lirc_register(struct rc_dev *dev)
return 0;
-out_ida: +out_put_device: + put_device(&dev->lirc_dev); ida_free(&lirc_ida, minor); return err; } diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c index e73dd330af47..d913fb901973 100644 --- a/drivers/memory/samsung/exynos-srom.c +++ b/drivers/memory/samsung/exynos-srom.c @@ -121,20 +121,18 @@ static int exynos_srom_probe(struct platform_device *pdev) return -ENOMEM;
srom->dev = dev; - srom->reg_base = of_iomap(np, 0); - if (!srom->reg_base) { + srom->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(srom->reg_base)) { dev_err(&pdev->dev, "iomap of exynos srom controller failed\n"); - return -ENOMEM; + return PTR_ERR(srom->reg_base); }
platform_set_drvdata(pdev, srom);
srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets, ARRAY_SIZE(exynos_srom_offsets)); - if (!srom->reg_offset) { - iounmap(srom->reg_base); + if (!srom->reg_offset) return -ENOMEM; - }
for_each_child_of_node(np, child) { if (exynos_srom_configure_bank(srom, child)) { diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c index 282b8fd08009..f0f0af677c9d 100644 --- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c +++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c @@ -81,8 +81,9 @@ static struct mfd_cell chtdc_ti_dev[] = { static const struct regmap_config chtdc_ti_regmap_config = { .reg_bits = 8, .val_bits = 8, - .max_register = 128, - .cache_type = REGCACHE_NONE, + .max_register = 0xff, + /* The hardware does not support reading multiple registers at once */ + .use_single_read = true, };
static const struct regmap_irq chtdc_ti_irqs[] = { diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index cb87e8273779..2c58df6855f2 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -945,7 +945,11 @@ static void mmc_sdio_remove(struct mmc_host *host) */ static int mmc_sdio_alive(struct mmc_host *host) { - return mmc_select_card(host->card); + if (!mmc_host_is_spi(host)) + return mmc_select_card(host->card); + else + return mmc_io_rw_direct(host->card, 0, 0, SDIO_CCCR_CCCR, 0, + NULL); }
/* diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 5ea362ec3955..0b013a7b9035 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -876,10 +876,14 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev, if (!of_property_read_u32(np, "bank-width", &val)) { if (val == 2) { nand->options |= NAND_BUSWIDTH_16; - } else if (val != 1) { + } else if (val == 1) { + nand->options |= NAND_BUSWIDTH_AUTO; + } else { dev_err(&pdev->dev, "invalid bank-width %u\n", val); return -EINVAL; } + } else { + nand->options |= NAND_BUSWIDTH_AUTO; }
if (of_get_property(np, "nand-skip-bbtscan", NULL)) diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 9d58d8334467..ea49b0df397e 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -482,10 +482,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) "missing 'reg' property in node %pOF\n", tbi); err = -EBUSY; + of_node_put(tbi); goto error; } set_tbipa(*prop, pdev, data->get_tbipa, priv->map, &res); + of_node_put(tbi); } }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index ca4b93a01034..e66f77af677c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1177,9 +1177,9 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, mlx4_unregister_mac(mdev->dev, priv->port, mac);
hlist_del_rcu(&entry->hlist); - kfree_rcu(entry, rcu); en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", entry->mac, priv->port); + kfree_rcu(entry, rcu); ++removed; } } diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 0910d06ed296..9b5349230ad3 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1596,14 +1596,10 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab) mutex_unlock(&ab->core_lock);
ath11k_dp_free(ab); - ath11k_hal_srng_deinit(ab); + ath11k_hal_srng_clear(ab);
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
- ret = ath11k_hal_srng_init(ab); - if (ret) - return ret; - clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
ret = ath11k_core_qmi_firmware_ready(ab); diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 692708f52b7d..11713ab60ecf 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -1351,6 +1351,22 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab) } EXPORT_SYMBOL(ath11k_hal_srng_deinit);
+void ath11k_hal_srng_clear(struct ath11k_base *ab) +{ + /* No need to memset rdp and wrp memory since each individual + * segment would get cleared in ath11k_hal_srng_src_hw_init() + * and ath11k_hal_srng_dst_hw_init(). + */ + memset(ab->hal.srng_list, 0, + sizeof(ab->hal.srng_list)); + memset(ab->hal.shadow_reg_addr, 0, + sizeof(ab->hal.shadow_reg_addr)); + ab->hal.avail_blk_resource = 0; + ab->hal.current_blk_index = 0; + ab->hal.num_shadow_reg_configured = 0; +} +EXPORT_SYMBOL(ath11k_hal_srng_clear); + void ath11k_hal_dump_srng_stats(struct ath11k_base *ab) { struct hal_srng *srng; diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 6a1f78ee6eb6..84b070b47958 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -957,6 +957,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, struct hal_srng_params *params); int ath11k_hal_srng_init(struct ath11k_base *ath11k); void ath11k_hal_srng_deinit(struct ath11k_base *ath11k); +void ath11k_hal_srng_clear(struct ath11k_base *ab); void ath11k_hal_dump_srng_stats(struct ath11k_base *ab); void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab, u32 **cfg, u32 *len); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 218c1d69090e..d16295219430 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3109,10 +3109,12 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND * because of high power consumption (> 2 Watt) in s2idle * sleep. Only some boards with Intel CPU are affected. + * (Note for testing: Samsung 990 Evo Plus has same PCI ID) */ if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") || dmi_match(DMI_BOARD_NAME, "GMxPXxx") || dmi_match(DMI_BOARD_NAME, "GXxMRXx") || + dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") || dmi_match(DMI_BOARD_NAME, "PH4PG31") || dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") || dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71")) diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index a4f25a98a531..c868d36f1177 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -1214,8 +1214,8 @@ static int ks_pcie_probe(struct platform_device *pdev) if (irq < 0) return irq;
- ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED, - "ks-pcie-error-irq", ks_pcie); + ret = devm_request_irq(dev, irq, ks_pcie_err_irq_handler, IRQF_SHARED, + "ks-pcie-error-irq", ks_pcie); if (ret < 0) { dev_err(dev, "failed to request error IRQ %d\n", irq); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 5100d2a53b8a..55bbc75c660b 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -1204,6 +1204,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, struct mrq_uphy_response resp; struct tegra_bpmp_message msg; struct mrq_uphy_request req; + int err;
/* * Controller-5 doesn't need to have its state set by BPMP-FW in @@ -1226,7 +1227,13 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, msg.rx.data = &resp; msg.rx.size = sizeof(resp);
- return tegra_bpmp_transfer(pcie->bpmp, &msg); + err = tegra_bpmp_transfer(pcie->bpmp, &msg); + if (err) + return err; + if (msg.rx.ret) + return -EINVAL; + + return 0; }
static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, @@ -1235,6 +1242,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, struct mrq_uphy_response resp; struct tegra_bpmp_message msg; struct mrq_uphy_request req; + int err;
memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); @@ -1254,7 +1262,13 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, msg.rx.data = &resp; msg.rx.size = sizeof(resp);
- return tegra_bpmp_transfer(pcie->bpmp, &msg); + err = tegra_bpmp_transfer(pcie->bpmp, &msg); + if (err) + return err; + if (msg.rx.ret) + return -EINVAL; + + return 0; }
static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) @@ -1949,10 +1963,10 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) { - if (unlikely(irq > 31)) + if (unlikely(irq > 32)) return -EINVAL;
- appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1); + appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1);
return 0; } diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index c165f6945459..3105d72facea 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -14,6 +14,7 @@ */
#include <linux/clk.h> +#include <linux/cleanup.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/export.h> @@ -269,7 +270,7 @@ struct tegra_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; struct mutex map_lock; - spinlock_t mask_lock; + raw_spinlock_t mask_lock; void *virt; dma_addr_t phys; int irq; @@ -1607,14 +1608,13 @@ static void tegra_msi_irq_mask(struct irq_data *d) struct tegra_msi *msi = irq_data_get_irq_chip_data(d); struct tegra_pcie *pcie = msi_to_pcie(msi); unsigned int index = d->hwirq / 32; - unsigned long flags; u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags); - value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); - value &= ~BIT(d->hwirq % 32); - afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); + value &= ~BIT(d->hwirq % 32); + afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); + } }
static void tegra_msi_irq_unmask(struct irq_data *d) @@ -1622,14 +1622,13 @@ static void tegra_msi_irq_unmask(struct irq_data *d) struct tegra_msi *msi = irq_data_get_irq_chip_data(d); struct tegra_pcie *pcie = msi_to_pcie(msi); unsigned int index = d->hwirq / 32; - unsigned long flags; u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags); - value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); - value |= BIT(d->hwirq % 32); - afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); + value |= BIT(d->hwirq % 32); + afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); + } }
static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) @@ -1745,7 +1744,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) int err;
mutex_init(&msi->map_lock); - spin_lock_init(&msi->mask_lock); + raw_spin_lock_init(&msi->mask_lock);
if (IS_ENABLED(CONFIG_PCI_MSI)) { err = tegra_allocate_domains(msi); diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index d0fe5076d977..ebfb56702c1a 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -12,6 +12,7 @@ */
#include <linux/bitops.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> @@ -36,7 +37,7 @@ struct rcar_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; struct mutex map_lock; - spinlock_t mask_lock; + raw_spinlock_t mask_lock; int irq1; int irq2; }; @@ -65,20 +66,13 @@ struct rcar_pcie_host { int (*phy_init_fn)(struct rcar_pcie_host *host); };
-static DEFINE_SPINLOCK(pmsr_lock); - static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base) { - unsigned long flags; u32 pmsr, val; int ret = 0;
- spin_lock_irqsave(&pmsr_lock, flags); - - if (!pcie_base || pm_runtime_suspended(pcie_dev)) { - ret = -EINVAL; - goto unlock_exit; - } + if (!pcie_base || pm_runtime_suspended(pcie_dev)) + return -EINVAL;
pmsr = readl(pcie_base + PMSR);
@@ -100,8 +94,6 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base) writel(L1FAEG | PMEL1RX, pcie_base + PMSR); }
-unlock_exit: - spin_unlock_irqrestore(&pmsr_lock, flags); return ret; }
@@ -648,28 +640,26 @@ static void rcar_msi_irq_mask(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; - unsigned long flags; u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags); - value = rcar_pci_read_reg(pcie, PCIEMSIIER); - value &= ~BIT(d->hwirq); - rcar_pci_write_reg(pcie, value, PCIEMSIIER); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = rcar_pci_read_reg(pcie, PCIEMSIIER); + value &= ~BIT(d->hwirq); + rcar_pci_write_reg(pcie, value, PCIEMSIIER); + } }
static void rcar_msi_irq_unmask(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; - unsigned long flags; u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags); - value = rcar_pci_read_reg(pcie, PCIEMSIIER); - value |= BIT(d->hwirq); - rcar_pci_write_reg(pcie, value, PCIEMSIIER); - spin_unlock_irqrestore(&msi->mask_lock, flags); + scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) { + value = rcar_pci_read_reg(pcie, PCIEMSIIER); + value |= BIT(d->hwirq); + rcar_pci_write_reg(pcie, value, PCIEMSIIER); + } }
static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) @@ -785,7 +775,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) int err;
mutex_init(&msi->map_lock); - spin_lock_init(&msi->mask_lock); + raw_spin_lock_init(&msi->mask_lock);
err = of_address_to_resource(dev->of_node, 0, &res); if (err) diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index bcf4f2ca82b3..3fe2187cdd0b 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -282,17 +282,20 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) if (!epf_test->dma_supported) return;
- dma_release_channel(epf_test->dma_chan_tx); - if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { + if (epf_test->dma_chan_tx) { + dma_release_channel(epf_test->dma_chan_tx); + if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { + epf_test->dma_chan_tx = NULL; + epf_test->dma_chan_rx = NULL; + return; + } epf_test->dma_chan_tx = NULL; - epf_test->dma_chan_rx = NULL; - return; }
- dma_release_channel(epf_test->dma_chan_rx); - epf_test->dma_chan_rx = NULL; - - return; + if (epf_test->dma_chan_rx) { + dma_release_channel(epf_test->dma_chan_rx); + epf_test->dma_chan_rx = NULL; + } }
static void pci_epf_test_print_rate(const char *ops, u64 size, diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index b2e8322755c1..fc2a2627db6b 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -582,15 +582,18 @@ static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs) if (dev->no_vf_scan) return 0;
+ pci_lock_rescan_remove(); for (i = 0; i < num_vfs; i++) { rc = pci_iov_add_virtfn(dev, i); if (rc) goto failed; } + pci_unlock_rescan_remove(); return 0; failed: while (i--) pci_iov_remove_virtfn(dev, i); + pci_unlock_rescan_remove();
return rc; } @@ -710,8 +713,10 @@ static void sriov_del_vfs(struct pci_dev *dev) struct pci_sriov *iov = dev->sriov; int i;
+ pci_lock_rescan_remove(); for (i = 0; i < iov->num_VFs; i++) pci_iov_remove_virtfn(dev, i); + pci_unlock_rescan_remove(); }
static void sriov_disable(struct pci_dev *dev) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 18e973a91a97..c3410241633f 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1611,6 +1611,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type) switch (err_type) { case PCI_ERS_RESULT_NONE: case PCI_ERS_RESULT_CAN_RECOVER: + case PCI_ERS_RESULT_NEED_RESET: envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY"; envp[idx++] = "DEVICE_ONLINE=0"; break; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index e1a980179dc1..3d1faaff50c9 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -196,8 +196,14 @@ static ssize_t max_link_width_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); + ssize_t ret;
- return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); + /* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */ + pci_config_pm_runtime_get(pdev); + ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); + pci_config_pm_runtime_put(pdev); + + return ret; } static DEVICE_ATTR_RO(max_link_width);
@@ -209,7 +215,10 @@ static ssize_t current_link_speed_show(struct device *dev, int err; enum pci_bus_speed speed;
+ pci_config_pm_runtime_get(pci_dev); err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL;
@@ -226,7 +235,10 @@ static ssize_t current_link_width_show(struct device *dev, u16 linkstat; int err;
+ pci_config_pm_runtime_get(pci_dev); err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL;
@@ -242,7 +254,10 @@ static ssize_t secondary_bus_number_show(struct device *dev, u8 sec_bus; int err;
+ pci_config_pm_runtime_get(pci_dev); err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL;
@@ -258,7 +273,10 @@ static ssize_t subordinate_bus_number_show(struct device *dev, u8 sub_bus; int err;
+ pci_config_pm_runtime_get(pci_dev); err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); + pci_config_pm_runtime_put(pci_dev); + if (err) return -EINVAL;
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 1b59e6fc7439..866426a8fb86 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -37,7 +37,7 @@ #define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */ -#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/ +#define AER_MAX_TYPEOF_UNCOR_ERRS 32 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source { unsigned int status; @@ -518,11 +518,11 @@ static const char *aer_uncorrectable_error_string[] = { "AtomicOpBlocked", /* Bit Position 24 */ "TLPBlockedErr", /* Bit Position 25 */ "PoisonTLPBlocked", /* Bit Position 26 */ - NULL, /* Bit Position 27 */ - NULL, /* Bit Position 28 */ - NULL, /* Bit Position 29 */ - NULL, /* Bit Position 30 */ - NULL, /* Bit Position 31 */ + "DMWrReqBlocked", /* Bit Position 27 */ + "IDECheck", /* Bit Position 28 */ + "MisIDETLP", /* Bit Position 29 */ + "PCRC_CHECK", /* Bit Position 30 */ + "TLPXlatBlocked", /* Bit Position 31 */ };
static const char *aer_agent_string[] = { diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 705893b5f7b0..197191178cdd 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -108,6 +108,12 @@ static int report_normal_detected(struct pci_dev *dev, void *data) return report_error_detected(dev, pci_channel_io_normal, data); }
+static int report_perm_failure_detected(struct pci_dev *dev, void *data) +{ + pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT); + return 0; +} + static int report_mmio_enabled(struct pci_dev *dev, void *data) { struct pci_driver *pdrv; @@ -275,7 +281,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, failed: pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
- pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT); + pci_walk_bridge(bridge, report_perm_failure_detected, NULL);
/* TODO: Should kernel panic here? */ pci_info(bridge, "device recovery failed\n"); diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c index 4fed74511931..ba958dc8d199 100644 --- a/drivers/power/supply/max77976_charger.c +++ b/drivers/power/supply/max77976_charger.c @@ -292,10 +292,10 @@ static int max77976_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_ONLINE: err = max77976_get_online(chg, &val->intval); break; - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: val->intval = MAX77976_CHG_CC_MAX; break; - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: err = max77976_get_integer(chg, CHG_CC, MAX77976_CHG_CC_MIN, MAX77976_CHG_CC_MAX, @@ -330,7 +330,7 @@ static int max77976_set_property(struct power_supply *psy, int err = 0;
switch (psp) { - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: err = max77976_set_integer(chg, CHG_CC, MAX77976_CHG_CC_MIN, MAX77976_CHG_CC_MAX, @@ -355,7 +355,7 @@ static int max77976_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { switch (psp) { - case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: return true; default: @@ -368,8 +368,8 @@ static enum power_supply_property max77976_psy_props[] = { POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_ONLINE, - POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, - POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index e157273fd2f7..c8a586e02cc6 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -274,7 +274,7 @@ static int berlin_pwm_suspend(struct device *dev) if (!channel) continue;
- channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE); + channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_EN); channel->ctrl = berlin_pwm_readl(bpc, i, BERLIN_PWM_CONTROL); channel->duty = berlin_pwm_readl(bpc, i, BERLIN_PWM_DUTY); channel->tcnt = berlin_pwm_readl(bpc, i, BERLIN_PWM_TCNT); @@ -305,7 +305,7 @@ static int berlin_pwm_resume(struct device *dev) berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL); berlin_pwm_writel(bpc, i, channel->duty, BERLIN_PWM_DUTY); berlin_pwm_writel(bpc, i, channel->tcnt, BERLIN_PWM_TCNT); - berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_ENABLE); + berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_EN); }
return 0; diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 04ce689dae92..6cfeaed49d07 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -443,6 +443,29 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) else err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+ /* + * Check for potential race described above. If the waiting for next + * second, and the second just ticked since the check above, either + * + * 1) It ticked after the alarm was set, and an alarm irq should be + * generated. + * + * 2) It ticked before the alarm was set, and alarm irq most likely will + * not be generated. + * + * While we cannot easily check for which of these two scenarios we + * are in, we can return -ETIME to signal that the timer has already + * expired, which is true in both cases. + */ + if ((scheduled - now) <= 1) { + err = __rtc_read_time(rtc, &tm); + if (err) + return err; + now = rtc_tm_to_time64(&tm); + if (scheduled <= now) + return -ETIME; + } + trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err); return err; } @@ -594,6 +617,10 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); rtc->uie_rtctimer.period = ktime_set(1, 0); err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); + if (!err && rtc->ops && rtc->ops->alarm_irq_enable) + err = rtc->ops->alarm_irq_enable(rtc->dev.parent, 1); + if (err) + goto out; } else { rtc_timer_remove(rtc, &rtc->uie_rtctimer); } diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c index 9f8b5d4a8f6b..6b77c122fdc1 100644 --- a/drivers/rtc/rtc-optee.c +++ b/drivers/rtc/rtc-optee.c @@ -320,6 +320,7 @@ static int optee_rtc_remove(struct device *dev) { struct optee_rtc *priv = dev_get_drvdata(dev);
+ tee_shm_free(priv->shm); tee_client_close_session(priv->ctx, priv->session_id); tee_client_close_context(priv->ctx);
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c index f587afa84357..6ae7b6f1f316 100644 --- a/drivers/rtc/rtc-x1205.c +++ b/drivers/rtc/rtc-x1205.c @@ -669,7 +669,7 @@ static const struct i2c_device_id x1205_id[] = { MODULE_DEVICE_TABLE(i2c, x1205_id);
static const struct of_device_id x1205_dt_ids[] = { - { .compatible = "xircom,x1205", }, + { .compatible = "xicor,x1205", }, {}, }; MODULE_DEVICE_TABLE(of, x1205_dt_ids); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index e529b3d3eaf3..e685cc262038 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6528,18 +6528,21 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, while (left) { sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; buff_size[sg_used] = sz; - buff[sg_used] = kmalloc(sz, GFP_KERNEL); - if (buff[sg_used] == NULL) { - status = -ENOMEM; - goto cleanup1; - } + if (ioc->Request.Type.Direction & XFER_WRITE) { - if (copy_from_user(buff[sg_used], data_ptr, sz)) { - status = -EFAULT; + buff[sg_used] = memdup_user(data_ptr, sz); + if (IS_ERR(buff[sg_used])) { + status = PTR_ERR(buff[sg_used]); goto cleanup1; } - } else - memset(buff[sg_used], 0, sz); + } else { + buff[sg_used] = kzalloc(sz, GFP_KERNEL); + if (!buff[sg_used]) { + status = -ENOMEM; + goto cleanup1; + } + } + left -= sz; data_ptr += sz; sg_used++; diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h index 7123a2efbf58..8ef174cd4d37 100644 --- a/drivers/scsi/mvsas/mv_defs.h +++ b/drivers/scsi/mvsas/mv_defs.h @@ -40,6 +40,7 @@ enum driver_configuration { MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ MVS_OAF_SZ = 64, /* Open address frame buffer size */ MVS_QUEUE_SIZE = 64, /* Support Queue depth */ + MVS_RSVD_SLOTS = 4, MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, };
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 2fde496fff5f..b500c343cad7 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -141,8 +141,8 @@ static void mvs_free(struct mvs_info *mvi) if (mvi->shost) scsi_host_put(mvi->shost); list_for_each_entry(mwq, &mvi->wq_list, entry) - cancel_delayed_work(&mwq->work_q); - kfree(mvi->tags); + cancel_delayed_work_sync(&mwq->work_q); + kfree(mvi->rsvd_tags); kfree(mvi); }
@@ -284,10 +284,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name); goto err_out; } - mvi->tags_num = slot_nr;
- /* Initialize tags */ - mvs_tag_init(mvi); return 0; err_out: return 1; @@ -369,8 +366,8 @@ static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev, mvi->sas = sha; mvi->shost = shost;
- mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL); - if (!mvi->tags) + mvi->rsvd_tags = bitmap_zalloc(MVS_RSVD_SLOTS, GFP_KERNEL); + if (!mvi->rsvd_tags) goto err_out;
if (MVS_CHIP_DISP->chip_ioremap(mvi)) @@ -471,6 +468,8 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost, else can_queue = MVS_CHIP_SLOT_SZ;
+ can_queue -= MVS_RSVD_SLOTS; + shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); shost->can_queue = can_queue; mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 1275f3be530f..fd73fcf71bd1 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -20,44 +20,40 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) return 0; }
-void mvs_tag_clear(struct mvs_info *mvi, u32 tag) +static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) { - void *bitmap = mvi->tags; + void *bitmap = mvi->rsvd_tags; clear_bit(tag, bitmap); }
-void mvs_tag_free(struct mvs_info *mvi, u32 tag) +static void mvs_tag_free(struct mvs_info *mvi, u32 tag) { + if (tag >= MVS_RSVD_SLOTS) + return; + mvs_tag_clear(mvi, tag); }
-void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) { - void *bitmap = mvi->tags; + void *bitmap = mvi->rsvd_tags; set_bit(tag, bitmap); }
-inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) +static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) { unsigned int index, tag; - void *bitmap = mvi->tags; + void *bitmap = mvi->rsvd_tags;
- index = find_first_zero_bit(bitmap, mvi->tags_num); + index = find_first_zero_bit(bitmap, MVS_RSVD_SLOTS); tag = index; - if (tag >= mvi->tags_num) + if (tag >= MVS_RSVD_SLOTS) return -SAS_QUEUE_FULL; mvs_tag_set(mvi, tag); *tag_out = tag; return 0; }
-void mvs_tag_init(struct mvs_info *mvi) -{ - int i; - for (i = 0; i < mvi->tags_num; ++i) - mvs_tag_clear(mvi, i); -} - static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) { unsigned long i = 0, j = 0, hi = 0; @@ -703,6 +699,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf struct mvs_task_exec_info tei; struct mvs_slot_info *slot; u32 tag = 0xdeadbeef, n_elem = 0; + struct request *rq; int rc = 0;
if (!dev->port) { @@ -767,9 +764,14 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf n_elem = task->num_scatter; }
- rc = mvs_tag_alloc(mvi, &tag); - if (rc) - goto err_out; + rq = sas_task_find_rq(task); + if (rq) { + tag = rq->tag + MVS_RSVD_SLOTS; + } else { + rc = mvs_tag_alloc(mvi, &tag); + if (rc) + goto err_out; + }
slot = &mvi->slot_info[tag];
@@ -864,7 +866,7 @@ int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) { u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - mvs_tag_clear(mvi, slot_idx); + mvs_tag_free(mvi, slot_idx); }
static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 509d8f32a04f..68df771e2975 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -370,8 +370,7 @@ struct mvs_info { u32 chip_id; const struct mvs_chip_info *chip;
- int tags_num; - unsigned long *tags; + unsigned long *rsvd_tags; /* further per-slot information */ struct mvs_phy phy[MVS_MAX_PHYS]; struct mvs_port port[MVS_MAX_PHYS]; @@ -424,11 +423,6 @@ struct mvs_task_exec_info {
/******************** function prototype *********************/ void mvs_get_sas_addr(void *buf, u32 buflen); -void mvs_tag_clear(struct mvs_info *mvi, u32 tag); -void mvs_tag_free(struct mvs_info *mvi, u32 tag); -void mvs_tag_set(struct mvs_info *mvi, unsigned int tag); -int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out); -void mvs_tag_init(struct mvs_info *mvi); void mvs_iounmap(void __iomem *regs); int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex); void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 02c5f00c8a57..3200e55136cd 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -655,6 +655,7 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata, reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; reg |= (op->addr.nbytes - 1); writel(reg, reg_base + CQSPI_REG_SIZE); + readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */ return 0; }
@@ -694,6 +695,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, reinit_completion(&cqspi->transfer_complete); writel(CQSPI_REG_INDIRECTRD_START_MASK, reg_base + CQSPI_REG_INDIRECTRD); + readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */
while (remaining > 0) { if (!wait_for_completion_timeout(&cqspi->transfer_complete, @@ -943,6 +945,7 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; reg |= (op->addr.nbytes - 1); writel(reg, reg_base + CQSPI_REG_SIZE); + readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */ return 0; }
@@ -968,6 +971,8 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, reinit_completion(&cqspi->transfer_complete); writel(CQSPI_REG_INDIRECTWR_START_MASK, reg_base + CQSPI_REG_INDIRECTWR); + readl(reg_base + CQSPI_REG_INDIRECTWR); /* Flush posted write. */ + /* * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access * Controller programming sequence, couple of cycles of diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 96b96516c980..9052a4873bca 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -1332,10 +1332,11 @@ static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) { struct evtchn_status status; evtchn_port_t port; - int rc = -ENOENT;
memset(&status, 0, sizeof(status)); for (port = 0; port < xen_evtchn_max_channels(); port++) { + int rc; + status.dom = DOMID_SELF; status.port = port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); @@ -1345,10 +1346,10 @@ static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) continue; if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { *evtchn = port; - break; + return 0; } } - return rc; + return -ENOENT; }
/** @@ -1806,9 +1807,20 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu) * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. */ - if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) { + int old_cpu = info->cpu; + bind_evtchn_to_cpu(evtchn, tcpu, false);
+ if (info->type == IRQT_VIRQ) { + int virq = info->u.virq; + int irq = per_cpu(virq_to_irq, old_cpu)[virq]; + + per_cpu(virq_to_irq, old_cpu)[virq] = -1; + per_cpu(virq_to_irq, tcpu)[virq] = irq; + } + } + do_unmask(info, EVT_MASK_REASON_TEMPORARY);
return 0; diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index c16df629907e..55537b673990 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -116,7 +116,7 @@ static void do_suspend(void) err = dpm_suspend_start(PMSG_FREEZE); if (err) { pr_err("%s: dpm_suspend_start %d\n", __func__, err); - goto out_thaw; + goto out_resume_end; }
printk(KERN_DEBUG "suspending xenstore...\n"); @@ -156,6 +156,7 @@ static void do_suspend(void) else xs_suspend_cancel();
+out_resume_end: dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
out_thaw: diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 58b0f04d7123..6f81708658ae 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -22,7 +22,11 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, int type;
if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) { - *max_len = BTRFS_FID_SIZE_CONNECTABLE; + if (btrfs_root_id(BTRFS_I(inode)->root) != + btrfs_root_id(BTRFS_I(parent)->root)) + *max_len = BTRFS_FID_SIZE_CONNECTABLE_ROOT; + else + *max_len = BTRFS_FID_SIZE_CONNECTABLE; return FILEID_INVALID; } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) { *max_len = BTRFS_FID_SIZE_NON_CONNECTABLE; @@ -44,6 +48,8 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, parent_root_id = BTRFS_I(parent)->root->root_key.objectid;
if (parent_root_id != fid->root_objectid) { + if (*max_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) + return FILEID_INVALID; fid->parent_root_objectid = parent_root_id; len = BTRFS_FID_SIZE_CONNECTABLE_ROOT; type = FILEID_BTRFS_WITH_PARENT_ROOT; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 516d40f707a6..8d66a6858cd2 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -415,6 +415,13 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, /* step one, find a bunch of delalloc bytes starting at start */ delalloc_start = *start; delalloc_end = 0; + + /* + * If @max_bytes is smaller than a block, btrfs_find_delalloc_range() can + * return early without handling any dirty ranges. + */ + ASSERT(max_bytes >= fs_info->sectorsize); + found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end, max_bytes, &cached_state); if (!found || delalloc_end <= *start || delalloc_start > orig_end) { @@ -445,13 +452,14 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, delalloc_start, delalloc_end); ASSERT(!ret || ret == -EAGAIN); if (ret == -EAGAIN) { - /* some of the pages are gone, lets avoid looping by - * shortening the size of the delalloc range we're searching + /* + * Some of the pages are gone, lets avoid looping by + * shortening the size of the delalloc range we're searching. */ free_extent_state(cached_state); cached_state = NULL; if (!loops) { - max_bytes = PAGE_SIZE; + max_bytes = fs_info->sectorsize; loops = 1; goto again; } else { diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index 6dae27d6f553..9979187a4b3c 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -117,9 +117,18 @@ static struct inode *get_cramfs_inode(struct super_block *sb, inode_nohighmem(inode); inode->i_data.a_ops = &cramfs_aops; break; - default: + case S_IFCHR: + case S_IFBLK: + case S_IFIFO: + case S_IFSOCK: init_special_inode(inode, cramfs_inode->mode, old_decode_dev(cramfs_inode->size)); + break; + default: + printk(KERN_DEBUG "CRAMFS: Invalid file type 0%04o for inode %lu.\n", + inode->i_mode, inode->i_ino); + iget_failed(inode); + return ERR_PTR(-EIO); }
inode->i_mode = cramfs_inode->mode; diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c index 1b68586f73f3..c970f41c5048 100644 --- a/fs/ext4/fsmap.c +++ b/fs/ext4/fsmap.c @@ -74,7 +74,8 @@ static int ext4_getfsmap_dev_compare(const void *p1, const void *p2) static bool ext4_getfsmap_rec_before_low_key(struct ext4_getfsmap_info *info, struct ext4_fsmap *rec) { - return rec->fmr_physical < info->gfi_low.fmr_physical; + return rec->fmr_physical + rec->fmr_length <= + info->gfi_low.fmr_physical; }
/* @@ -200,15 +201,18 @@ static int ext4_getfsmap_meta_helper(struct super_block *sb, ext4_group_first_block_no(sb, agno)); fs_end = fs_start + EXT4_C2B(sbi, len);
- /* Return relevant extents from the meta_list */ + /* + * Return relevant extents from the meta_list. We emit all extents that + * partially/fully overlap with the query range + */ list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) { - if (p->fmr_physical < info->gfi_next_fsblk) { + if (p->fmr_physical + p->fmr_length <= info->gfi_next_fsblk) { list_del(&p->fmr_list); kfree(p); continue; } - if (p->fmr_physical <= fs_start || - p->fmr_physical + p->fmr_length <= fs_end) { + if (p->fmr_physical <= fs_end && + p->fmr_physical + p->fmr_length > fs_start) { /* Emit the retained free extent record if present */ if (info->gfi_lastfree.fmr_owner) { error = ext4_getfsmap_helper(sb, info, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 632842f06c12..1a7f8f9af08d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3945,7 +3945,11 @@ int ext4_can_truncate(struct inode *inode) * We have to make sure i_disksize gets properly updated before we truncate * page cache due to hole punching or zero range. Otherwise i_disksize update * can get lost as it may have been postponed to submission of writeback but - * that will never happen after we truncate page cache. + * that will never happen if we remove the folio containing i_size from the + * page cache. Also if we punch hole within i_size but above i_disksize, + * following ext4_page_mkwrite() may mistakenly allocate written blocks over + * the hole and thus introduce allocated blocks beyond i_disksize which is + * not allowed (e2fsck would complain in case of crash). */ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, loff_t len) @@ -3956,9 +3960,11 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, loff_t size = i_size_read(inode);
WARN_ON(!inode_is_locked(inode)); - if (offset > size || offset + len < size) + if (offset > size) return 0;
+ if (offset + len < size) + size = offset + len; if (EXT4_I(inode)->i_disksize >= size) return 0;
diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c index c53918768cb2..05997b4d0120 100644 --- a/fs/ext4/orphan.c +++ b/fs/ext4/orphan.c @@ -513,7 +513,7 @@ void ext4_release_orphan_info(struct super_block *sb) return; for (i = 0; i < oi->of_blocks; i++) brelse(oi->of_binfo[i].ob_bh); - kfree(oi->of_binfo); + kvfree(oi->of_binfo); }
static struct ext4_orphan_block_tail *ext4_orphan_block_tail( @@ -584,9 +584,20 @@ int ext4_init_orphan_info(struct super_block *sb) ext4_msg(sb, KERN_ERR, "get orphan inode failed"); return PTR_ERR(inode); } + /* + * This is just an artificial limit to prevent corrupted fs from + * consuming absurd amounts of memory when pinning blocks of orphan + * file in memory. + */ + if (inode->i_size > 8 << 20) { + ext4_msg(sb, KERN_ERR, "orphan file too big: %llu", + (unsigned long long)inode->i_size); + ret = -EFSCORRUPTED; + goto out_put; + } oi->of_blocks = inode->i_size >> sb->s_blocksize_bits; oi->of_csum_seed = EXT4_I(inode)->i_csum_seed; - oi->of_binfo = kmalloc_array(oi->of_blocks, + oi->of_binfo = kvmalloc_array(oi->of_blocks, sizeof(struct ext4_orphan_block), GFP_KERNEL); if (!oi->of_binfo) { @@ -627,7 +638,7 @@ int ext4_init_orphan_info(struct super_block *sb) out_free: for (i--; i >= 0; i--) brelse(oi->of_binfo[i].ob_bh); - kfree(oi->of_binfo); + kvfree(oi->of_binfo); out_put: iput(inode); return ret; diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 95dbc7c9843b..e6de83037d62 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -987,7 +987,7 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, int ref_change) { struct ext4_iloc iloc; - s64 ref_count; + u64 ref_count; int ret;
inode_lock_nested(ea_inode, I_MUTEX_XATTR); @@ -997,13 +997,17 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, goto out;
ref_count = ext4_xattr_inode_get_ref(ea_inode); + if ((ref_count == 0 && ref_change < 0) || (ref_count == U64_MAX && ref_change > 0)) { + ext4_error_inode(ea_inode, __func__, __LINE__, 0, + "EA inode %lu ref wraparound: ref_count=%lld ref_change=%d", + ea_inode->i_ino, ref_count, ref_change); + ret = -EFSCORRUPTED; + goto out; + } ref_count += ref_change; ext4_xattr_inode_set_ref(ea_inode, ref_count);
if (ref_change > 0) { - WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld", - ea_inode->i_ino, ref_count); - if (ref_count == 1) { WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u", ea_inode->i_ino, ea_inode->i_nlink); @@ -1012,9 +1016,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, ext4_orphan_del(handle, ea_inode); } } else { - WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld", - ea_inode->i_ino, ref_count); - if (ref_count == 0) { WARN_ONCE(ea_inode->i_nlink != 1, "EA inode %lu i_nlink=%u", diff --git a/fs/file.c b/fs/file.c index e921e6072ada..d7ecea84d593 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1136,7 +1136,10 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags) err = expand_files(files, fd); if (unlikely(err < 0)) goto out_unlock; - return do_dup2(files, file, fd, flags); + err = do_dup2(files, file, fd, flags); + if (err < 0) + return err; + return 0;
out_unlock: spin_unlock(&files->file_lock); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 41f8ae8a416f..75e8c102c5ee 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -420,22 +420,23 @@ static bool inode_do_switch_wbs(struct inode *inode, * Transfer to @new_wb's IO list if necessary. If the @inode is dirty, * the specific list @inode was on is ignored and the @inode is put on * ->b_dirty which is always correct including from ->b_dirty_time. - * The transfer preserves @inode->dirtied_when ordering. If the @inode - * was clean, it means it was on the b_attached list, so move it onto - * the b_attached list of @new_wb. + * If the @inode was clean, it means it was on the b_attached list, so + * move it onto the b_attached list of @new_wb. */ if (!list_empty(&inode->i_io_list)) { inode->i_wb = new_wb;
if (inode->i_state & I_DIRTY_ALL) { - struct inode *pos; - - list_for_each_entry(pos, &new_wb->b_dirty, i_io_list) - if (time_after_eq(inode->dirtied_when, - pos->dirtied_when)) - break; + /* + * We need to keep b_dirty list sorted by + * dirtied_time_when. However properly sorting the + * inode in the list gets too expensive when switching + * many inodes. So just attach inode at the end of the + * dirty list and clobber the dirtied_time_when. + */ + inode->dirtied_time_when = jiffies; inode_io_list_move_locked(inode, new_wb, - pos->i_io_list.prev); + &new_wb->b_dirty); } else { inode_cgwb_move_to_attached(inode, new_wb); } @@ -477,6 +478,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) */ down_read(&bdi->wb_switch_rwsem);
+ inodep = isw->inodes; /* * By the time control reaches here, RCU grace period has passed * since I_WB_SWITCH assertion and all wb stat update transactions @@ -487,6 +489,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) * gives us exclusion against all wb related operations on @inode * including IO list manipulations and stat updates. */ +relock: if (old_wb < new_wb) { spin_lock(&old_wb->list_lock); spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING); @@ -495,10 +498,17 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING); }
- for (inodep = isw->inodes; *inodep; inodep++) { + while (*inodep) { WARN_ON_ONCE((*inodep)->i_wb != old_wb); if (inode_do_switch_wbs(*inodep, old_wb, new_wb)) nr_switched++; + inodep++; + if (*inodep && need_resched()) { + spin_unlock(&new_wb->list_lock); + spin_unlock(&old_wb->list_lock); + cond_resched(); + goto relock; + } }
spin_unlock(&new_wb->list_lock); diff --git a/fs/fsopen.c b/fs/fsopen.c index fc9d2d9fd234..25689eaef511 100644 --- a/fs/fsopen.c +++ b/fs/fsopen.c @@ -18,50 +18,56 @@ #include "internal.h" #include "mount.h"
+static inline const char *fetch_message_locked(struct fc_log *log, size_t len, + bool *need_free) +{ + const char *p; + int index; + + if (unlikely(log->head == log->tail)) + return ERR_PTR(-ENODATA); + + index = log->tail & (ARRAY_SIZE(log->buffer) - 1); + p = log->buffer[index]; + if (unlikely(strlen(p) > len)) + return ERR_PTR(-EMSGSIZE); + + log->buffer[index] = NULL; + *need_free = log->need_free & (1 << index); + log->need_free &= ~(1 << index); + log->tail++; + + return p; +} + /* * Allow the user to read back any error, warning or informational messages. + * Only one message is returned for each read(2) call. */ static ssize_t fscontext_read(struct file *file, char __user *_buf, size_t len, loff_t *pos) { struct fs_context *fc = file->private_data; - struct fc_log *log = fc->log.log; - unsigned int logsize = ARRAY_SIZE(log->buffer); - ssize_t ret; - char *p; + ssize_t err; + const char *p __free(kfree) = NULL, *message; bool need_free; - int index, n; + int n;
- ret = mutex_lock_interruptible(&fc->uapi_mutex); - if (ret < 0) - return ret; - - if (log->head == log->tail) { - mutex_unlock(&fc->uapi_mutex); - return -ENODATA; - } - - index = log->tail & (logsize - 1); - p = log->buffer[index]; - need_free = log->need_free & (1 << index); - log->buffer[index] = NULL; - log->need_free &= ~(1 << index); - log->tail++; + err = mutex_lock_interruptible(&fc->uapi_mutex); + if (err < 0) + return err; + message = fetch_message_locked(fc->log.log, len, &need_free); mutex_unlock(&fc->uapi_mutex); + if (IS_ERR(message)) + return PTR_ERR(message);
- ret = -EMSGSIZE; - n = strlen(p); - if (n > len) - goto err_free; - ret = -EFAULT; - if (copy_to_user(_buf, p, n) != 0) - goto err_free; - ret = n; - -err_free: if (need_free) - kfree(p); - return ret; + p = message; + + n = strlen(message); + if (copy_to_user(_buf, message, n)) + return -EFAULT; + return n; }
static int fscontext_release(struct inode *inode, struct file *file) diff --git a/fs/minix/inode.c b/fs/minix/inode.c index da8bdd1712a7..9da6903e306c 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -470,8 +470,14 @@ void minix_set_inode(struct inode *inode, dev_t rdev) inode->i_op = &minix_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &minix_aops; - } else + } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { init_special_inode(inode, inode->i_mode, rdev); + } else { + printk(KERN_DEBUG "MINIX-fs: Invalid file type 0%04o for inode %lu.\n", + inode->i_mode, inode->i_ino); + make_bad_inode(inode); + } }
/* diff --git a/fs/namei.c b/fs/namei.c index 6ce07cde1c27..c8fe0bbf2611 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1360,6 +1360,10 @@ static int follow_automount(struct path *path, int *count, unsigned lookup_flags dentry->d_inode) return -EISDIR;
+ /* No need to trigger automounts if mountpoint crossing is disabled. */ + if (lookup_flags & LOOKUP_NO_XDEV) + return -EXDEV; + if (count && (*count)++ >= MAXSYMLINKS) return -ELOOP;
@@ -1383,6 +1387,10 @@ static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped, /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (flags & DCACHE_MANAGE_TRANSIT) { + if (lookup_flags & LOOKUP_NO_XDEV) { + ret = -EXDEV; + break; + } ret = path->dentry->d_op->d_manage(path, false); flags = smp_load_acquire(&path->dentry->d_flags); if (ret < 0) diff --git a/fs/namespace.c b/fs/namespace.c index 2a76269f2a4e..f22f76d9c22f 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -64,6 +64,15 @@ static int __init set_mphash_entries(char *str) } __setup("mphash_entries=", set_mphash_entries);
+static char * __initdata initramfs_options; +static int __init initramfs_options_setup(char *str) +{ + initramfs_options = str; + return 1; +} + +__setup("initramfs_options=", initramfs_options_setup); + static u64 event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); @@ -4414,7 +4423,7 @@ static void __init init_mount_tree(void) struct mnt_namespace *ns; struct path root;
- mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL); + mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", initramfs_options); if (IS_ERR(mnt)) panic("Can't create rootfs");
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c index 46a7f9b813e5..b02886f38925 100644 --- a/fs/nfsd/lockd.c +++ b/fs/nfsd/lockd.c @@ -48,6 +48,21 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp, switch (nfserr) { case nfs_ok: return 0; + case nfserr_jukebox: + /* this error can indicate a presence of a conflicting + * delegation to an NLM lock request. Options are: + * (1) For now, drop this request and make the client + * retry. When delegation is returned, client's lock retry + * will complete. + * (2) NLM4_DENIED as per "spec" signals to the client + * that the lock is unavailable now but client can retry. + * Linux client implementation does not. It treats + * NLM4_DENIED same as NLM4_FAILED and errors the request. + * (3) For the future, treat this as blocked lock and try + * to callback when the delegation is returned but might + * not have a proper lock request to block on. + */ + fallthrough; case nfserr_dropit: return nlm_drop_reply; case nfserr_stale: diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index a5e017d94215..f9354c3713ac 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1334,7 +1334,7 @@ static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr, return 0; } if (work) { - strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr) - 1); + strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr)); refcount_set(&work->nsui_refcnt, 2); work->nsui_busy = true; list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list); diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c index 8dbd8e70c295..57aee52ff999 100644 --- a/fs/ntfs3/bitmap.c +++ b/fs/ntfs3/bitmap.c @@ -1374,6 +1374,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits) mark_buffer_dirty(bh); unlock_buffer(bh); /* err = sync_dirty_buffer(bh); */ + put_bh(bh);
b0 = 0; bits -= op; diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h index 4464a62228cf..d3c0b985eb8c 100644 --- a/fs/smb/server/ksmbd_netlink.h +++ b/fs/smb/server/ksmbd_netlink.h @@ -107,10 +107,11 @@ struct ksmbd_startup_request { __u32 smb2_max_credits; /* MAX credits */ __u32 smbd_max_io_size; /* smbd read write size */ __u32 max_connections; /* Number of maximum simultaneous connections */ - __u32 reserved[126]; /* Reserved room */ + __u32 max_ip_connections; /* Number of maximum connection per ip address */ + __u32 reserved[125]; /* Reserved room */ __u32 ifc_list_sz; /* interfaces list size */ __s8 ____payload[]; -}; +} __packed;
#define KSMBD_STARTUP_CONFIG_INTERFACES(s) ((s)->____payload)
diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h index db7278181760..2cb1b855a39e 100644 --- a/fs/smb/server/server.h +++ b/fs/smb/server/server.h @@ -42,6 +42,7 @@ struct ksmbd_server_config { struct smb_sid domain_sid; unsigned int auth_mechs; unsigned int max_connections; + unsigned int max_ip_connections;
char *conf[SERVER_CONF_WORK_GROUP + 1]; }; diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c index 7fc4b33b89e3..3ca820d0b8d6 100644 --- a/fs/smb/server/transport_ipc.c +++ b/fs/smb/server/transport_ipc.c @@ -318,6 +318,9 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req) if (req->max_connections) server_conf.max_connections = req->max_connections;
+ if (req->max_ip_connections) + server_conf.max_ip_connections = req->max_ip_connections; + ret = ksmbd_set_netbios_name(req->netbios_name); ret |= ksmbd_set_server_string(req->server_string); ret |= ksmbd_set_work_group(req->work_group); diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c index a4e7d1a5d64d..4ef032e737f3 100644 --- a/fs/smb/server/transport_tcp.c +++ b/fs/smb/server/transport_tcp.c @@ -236,6 +236,7 @@ static int ksmbd_kthread_fn(void *p) struct interface *iface = (struct interface *)p; struct ksmbd_conn *conn; int ret; + unsigned int max_ip_conns;
while (!kthread_should_stop()) { mutex_lock(&iface->sock_release_lock); @@ -253,34 +254,38 @@ static int ksmbd_kthread_fn(void *p) continue; }
+ if (!server_conf.max_ip_connections) + goto skip_max_ip_conns_limit; + /* * Limits repeated connections from clients with the same IP. */ + max_ip_conns = 0; down_read(&conn_list_lock); - list_for_each_entry(conn, &conn_list, conns_list) + list_for_each_entry(conn, &conn_list, conns_list) { #if IS_ENABLED(CONFIG_IPV6) if (client_sk->sk->sk_family == AF_INET6) { if (memcmp(&client_sk->sk->sk_v6_daddr, - &conn->inet6_addr, 16) == 0) { - ret = -EAGAIN; - break; - } + &conn->inet6_addr, 16) == 0) + max_ip_conns++; } else if (inet_sk(client_sk->sk)->inet_daddr == - conn->inet_addr) { - ret = -EAGAIN; - break; - } + conn->inet_addr) + max_ip_conns++; #else if (inet_sk(client_sk->sk)->inet_daddr == - conn->inet_addr) { + conn->inet_addr) + max_ip_conns++; +#endif + if (server_conf.max_ip_connections <= max_ip_conns) { ret = -EAGAIN; break; } -#endif + } up_read(&conn_list_lock); if (ret == -EAGAIN) continue;
+skip_max_ip_conns_limit: if (server_conf.max_connections && atomic_inc_return(&active_num_conn) >= server_conf.max_connections) { pr_info_ratelimited("Limit the maximum number of connections(%u)\n", diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c index c381d08c30c2..42c97c68db63 100644 --- a/fs/squashfs/inode.c +++ b/fs/squashfs/inode.c @@ -140,8 +140,17 @@ int squashfs_read_inode(struct inode *inode, long long ino) if (err < 0) goto failed_read;
+ inode->i_size = le32_to_cpu(sqsh_ino->file_size); frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { + /* + * the file cannot have a fragment (tailend) and have a + * file size a multiple of the block size + */ + if ((inode->i_size & (msblk->block_size - 1)) == 0) { + err = -EINVAL; + goto failed_read; + } frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { @@ -155,7 +164,6 @@ int squashfs_read_inode(struct inode *inode, long long ino) }
set_nlink(inode, 1); - inode->i_size = le32_to_cpu(sqsh_ino->file_size); inode->i_fop = &generic_ro_fops; inode->i_mode |= S_IFREG; inode->i_blocks = ((inode->i_size - 1) >> 9) + 1; @@ -184,8 +192,21 @@ int squashfs_read_inode(struct inode *inode, long long ino) if (err < 0) goto failed_read;
+ inode->i_size = le64_to_cpu(sqsh_ino->file_size); + if (inode->i_size < 0) { + err = -EINVAL; + goto failed_read; + } frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { + /* + * the file cannot have a fragment (tailend) and have a + * file size a multiple of the block size + */ + if ((inode->i_size & (msblk->block_size - 1)) == 0) { + err = -EINVAL; + goto failed_read; + } frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { @@ -200,7 +221,6 @@ int squashfs_read_inode(struct inode *inode, long long ino)
xattr_id = le32_to_cpu(sqsh_ino->xattr); set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); - inode->i_size = le64_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_inode_ops; inode->i_fop = &generic_ro_fops; inode->i_mode |= S_IFREG; diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 911bab2998e2..413153f3aa4f 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -213,6 +213,12 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0); */ ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
+/* + * ACPI Global Lock is mainly used for systems with SMM, so no-SMM systems + * (such as loong_arch) may not have and not use Global Lock. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_use_global_lock, TRUE); + /* * Maximum timeout for While() loop iterations before forced method abort. * This mechanism is intended to prevent infinite loops during interpreter diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index cde032f86856..b5c5aa25bb8b 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -74,30 +74,32 @@ #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__)) #include <linux/tracepoint-defs.h>
+#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint) DECLARE_TRACEPOINT(rwmmio_write); DECLARE_TRACEPOINT(rwmmio_post_write); DECLARE_TRACEPOINT(rwmmio_read); DECLARE_TRACEPOINT(rwmmio_post_read);
void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, - unsigned long caller_addr); + unsigned long caller_addr, unsigned long caller_addr0); void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, - unsigned long caller_addr); + unsigned long caller_addr, unsigned long caller_addr0); void log_read_mmio(u8 width, const volatile void __iomem *addr, - unsigned long caller_addr); + unsigned long caller_addr, unsigned long caller_addr0); void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, - unsigned long caller_addr); + unsigned long caller_addr, unsigned long caller_addr0);
#else
+#define rwmmio_tracepoint_enabled(tracepoint) false static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, - unsigned long caller_addr) {} + unsigned long caller_addr, unsigned long caller_addr0) {} static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, - unsigned long caller_addr) {} + unsigned long caller_addr, unsigned long caller_addr0) {} static inline void log_read_mmio(u8 width, const volatile void __iomem *addr, - unsigned long caller_addr) {} + unsigned long caller_addr, unsigned long caller_addr0) {} static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, - unsigned long caller_addr) {} + unsigned long caller_addr, unsigned long caller_addr0) {}
#endif /* CONFIG_TRACE_MMIO_ACCESS */
@@ -188,11 +190,13 @@ static inline u8 readb(const volatile void __iomem *addr) { u8 val;
- log_read_mmio(8, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __raw_readb(addr); __io_ar(val); - log_post_read_mmio(val, 8, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -203,11 +207,13 @@ static inline u16 readw(const volatile void __iomem *addr) { u16 val;
- log_read_mmio(16, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); __io_ar(val); - log_post_read_mmio(val, 16, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -218,11 +224,13 @@ static inline u32 readl(const volatile void __iomem *addr) { u32 val;
- log_read_mmio(32, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); __io_ar(val); - log_post_read_mmio(val, 32, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -234,11 +242,13 @@ static inline u64 readq(const volatile void __iomem *addr) { u64 val;
- log_read_mmio(64, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); __io_ar(val); - log_post_read_mmio(val, 64, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -248,11 +258,13 @@ static inline u64 readq(const volatile void __iomem *addr) #define writeb writeb static inline void writeb(u8 value, volatile void __iomem *addr) { - log_write_mmio(value, 8, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writeb(value, addr); __io_aw(); - log_post_write_mmio(value, 8, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); } #endif
@@ -260,11 +272,13 @@ static inline void writeb(u8 value, volatile void __iomem *addr) #define writew writew static inline void writew(u16 value, volatile void __iomem *addr) { - log_write_mmio(value, 16, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writew((u16 __force)cpu_to_le16(value), addr); __io_aw(); - log_post_write_mmio(value, 16, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); } #endif
@@ -272,11 +286,13 @@ static inline void writew(u16 value, volatile void __iomem *addr) #define writel writel static inline void writel(u32 value, volatile void __iomem *addr) { - log_write_mmio(value, 32, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writel((u32 __force)__cpu_to_le32(value), addr); __io_aw(); - log_post_write_mmio(value, 32, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); } #endif
@@ -285,11 +301,13 @@ static inline void writel(u32 value, volatile void __iomem *addr) #define writeq writeq static inline void writeq(u64 value, volatile void __iomem *addr) { - log_write_mmio(value, 64, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writeq((u64 __force)__cpu_to_le64(value), addr); __io_aw(); - log_post_write_mmio(value, 64, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); } #endif #endif /* CONFIG_64BIT */ @@ -305,9 +323,11 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr) { u8 val;
- log_read_mmio(8, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); val = __raw_readb(addr); - log_post_read_mmio(val, 8, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -318,9 +338,11 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr) { u16 val;
- log_read_mmio(16, addr, _THIS_IP_); - val = __le16_to_cpu(__raw_readw(addr)); - log_post_read_mmio(val, 16, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); + val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -331,9 +353,11 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr) { u32 val;
- log_read_mmio(32, addr, _THIS_IP_); - val = __le32_to_cpu(__raw_readl(addr)); - log_post_read_mmio(val, 32, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); + val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -344,9 +368,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) { u64 val;
- log_read_mmio(64, addr, _THIS_IP_); - val = __le64_to_cpu(__raw_readq(addr)); - log_post_read_mmio(val, 64, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); + val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -355,9 +381,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) #define writeb_relaxed writeb_relaxed static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) { - log_write_mmio(value, 8, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); __raw_writeb(value, addr); - log_post_write_mmio(value, 8, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); } #endif
@@ -365,9 +393,11 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) #define writew_relaxed writew_relaxed static inline void writew_relaxed(u16 value, volatile void __iomem *addr) { - log_write_mmio(value, 16, addr, _THIS_IP_); - __raw_writew(cpu_to_le16(value), addr); - log_post_write_mmio(value, 16, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); + __raw_writew((u16 __force)cpu_to_le16(value), addr); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); } #endif
@@ -375,9 +405,11 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr) #define writel_relaxed writel_relaxed static inline void writel_relaxed(u32 value, volatile void __iomem *addr) { - log_write_mmio(value, 32, addr, _THIS_IP_); - __raw_writel(__cpu_to_le32(value), addr); - log_post_write_mmio(value, 32, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); + __raw_writel((u32 __force)__cpu_to_le32(value), addr); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); } #endif
@@ -385,9 +417,11 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr) #define writeq_relaxed writeq_relaxed static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) { - log_write_mmio(value, 64, addr, _THIS_IP_); - __raw_writeq(__cpu_to_le64(value), addr); - log_post_write_mmio(value, 64, addr, _THIS_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); + __raw_writeq((u64 __force)__cpu_to_le64(value), addr); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); } #endif
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h index de45cf2ee1e4..ce2086f97e3f 100644 --- a/include/linux/iio/frequency/adf4350.h +++ b/include/linux/iio/frequency/adf4350.h @@ -51,7 +51,7 @@
/* REG3 Bit Definitions */ #define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3) -#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16) +#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 15) #define ADF4350_REG3_12BIT_CSR_EN (1 << 18) #define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21) #define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22) diff --git a/include/linux/sched.h b/include/linux/sched.h index 4dc764f3d26f..37ee1b1c9ed5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2313,6 +2313,12 @@ enum rseq_event_mask { RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), };
+#ifdef CONFIG_MEMBARRIER +# define RSEQ_EVENT_GUARD irq +#else +# define RSEQ_EVENT_GUARD preempt +#endif + static inline void rseq_set_notify_resume(struct task_struct *t) { if (t->rseq) @@ -2331,9 +2337,8 @@ static inline void rseq_handle_notify_resume(struct ksignal *ksig, static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { - preempt_disable(); - __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); - preempt_enable(); + scoped_guard(RSEQ_EVENT_GUARD) + __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); rseq_handle_notify_resume(ksig, regs); }
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 2f80c9c818ed..0cbac7587000 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -1462,19 +1462,23 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers; * * Note: only legacy non-MC drivers may need this macro. */ -#define v4l2_subdev_call_state_try(sd, o, f, args...) \ - ({ \ - int __result; \ - static struct lock_class_key __key; \ - const char *name = KBUILD_BASENAME \ - ":" __stringify(__LINE__) ":state->lock"; \ - struct v4l2_subdev_state *state = \ - __v4l2_subdev_state_alloc(sd, name, &__key); \ - v4l2_subdev_lock_state(state); \ - __result = v4l2_subdev_call(sd, o, f, state, ##args); \ - v4l2_subdev_unlock_state(state); \ - __v4l2_subdev_state_free(state); \ - __result; \ +#define v4l2_subdev_call_state_try(sd, o, f, args...) \ + ({ \ + int __result; \ + static struct lock_class_key __key; \ + const char *name = KBUILD_BASENAME \ + ":" __stringify(__LINE__) ":state->lock"; \ + struct v4l2_subdev_state *state = \ + __v4l2_subdev_state_alloc(sd, name, &__key); \ + if (IS_ERR(state)) { \ + __result = PTR_ERR(state); \ + } else { \ + v4l2_subdev_lock_state(state); \ + __result = v4l2_subdev_call(sd, o, f, state, ##args); \ + v4l2_subdev_unlock_state(state); \ + __v4l2_subdev_state_free(state); \ + } \ + __result; \ })
/** diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 2dbead74a2af..9e9dff75a02b 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -648,6 +648,24 @@ static inline bool sas_is_internal_abort(struct sas_task *task) return task->task_proto == SAS_PROTOCOL_INTERNAL_ABORT; }
+static inline struct request *sas_task_find_rq(struct sas_task *task) +{ + struct scsi_cmnd *scmd; + + if (task->task_proto & SAS_PROTOCOL_STP_ALL) { + struct ata_queued_cmd *qc = task->uldd_task; + + scmd = qc ? qc->scsicmd : NULL; + } else { + scmd = task->uldd_task; + } + + if (!scmd) + return NULL; + + return scsi_cmd_to_rq(scmd); +} + struct sas_domain_function_template { /* The class calls these to notify the LLDD of an event. */ void (*lldd_port_formed)(struct asd_sas_phy *); diff --git a/include/trace/events/rwmmio.h b/include/trace/events/rwmmio.h index de41159216c1..a43e5dd7436b 100644 --- a/include/trace/events/rwmmio.h +++ b/include/trace/events/rwmmio.h @@ -12,12 +12,14 @@
DECLARE_EVENT_CLASS(rwmmio_rw_template,
- TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), + TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width, + volatile void __iomem *addr),
- TP_ARGS(caller, val, width, addr), + TP_ARGS(caller, caller0, val, width, addr),
TP_STRUCT__entry( __field(unsigned long, caller) + __field(unsigned long, caller0) __field(unsigned long, addr) __field(u64, val) __field(u8, width) @@ -25,56 +27,64 @@ DECLARE_EVENT_CLASS(rwmmio_rw_template,
TP_fast_assign( __entry->caller = caller; + __entry->caller0 = caller0; __entry->val = val; __entry->addr = (unsigned long)addr; __entry->width = width; ),
- TP_printk("%pS width=%d val=%#llx addr=%#lx", - (void *)__entry->caller, __entry->width, + TP_printk("%pS -> %pS width=%d val=%#llx addr=%#lx", + (void *)__entry->caller0, (void *)__entry->caller, __entry->width, __entry->val, __entry->addr) );
DEFINE_EVENT(rwmmio_rw_template, rwmmio_write, - TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), - TP_ARGS(caller, val, width, addr) + TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width, + volatile void __iomem *addr), + TP_ARGS(caller, caller0, val, width, addr) );
DEFINE_EVENT(rwmmio_rw_template, rwmmio_post_write, - TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), - TP_ARGS(caller, val, width, addr) + TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width, + volatile void __iomem *addr), + TP_ARGS(caller, caller0, val, width, addr) );
TRACE_EVENT(rwmmio_read,
- TP_PROTO(unsigned long caller, u8 width, const volatile void __iomem *addr), + TP_PROTO(unsigned long caller, unsigned long caller0, u8 width, + const volatile void __iomem *addr),
- TP_ARGS(caller, width, addr), + TP_ARGS(caller, caller0, width, addr),
TP_STRUCT__entry( __field(unsigned long, caller) + __field(unsigned long, caller0) __field(unsigned long, addr) __field(u8, width) ),
TP_fast_assign( __entry->caller = caller; + __entry->caller0 = caller0; __entry->addr = (unsigned long)addr; __entry->width = width; ),
- TP_printk("%pS width=%d addr=%#lx", - (void *)__entry->caller, __entry->width, __entry->addr) + TP_printk("%pS -> %pS width=%d addr=%#lx", + (void *)__entry->caller0, (void *)__entry->caller, __entry->width, __entry->addr) );
TRACE_EVENT(rwmmio_post_read,
- TP_PROTO(unsigned long caller, u64 val, u8 width, const volatile void __iomem *addr), + TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width, + const volatile void __iomem *addr),
- TP_ARGS(caller, val, width, addr), + TP_ARGS(caller, caller0, val, width, addr),
TP_STRUCT__entry( __field(unsigned long, caller) + __field(unsigned long, caller0) __field(unsigned long, addr) __field(u64, val) __field(u8, width) @@ -82,13 +92,14 @@ TRACE_EVENT(rwmmio_post_read,
TP_fast_assign( __entry->caller = caller; + __entry->caller0 = caller0; __entry->val = val; __entry->addr = (unsigned long)addr; __entry->width = width; ),
- TP_printk("%pS width=%d val=%#llx addr=%#lx", - (void *)__entry->caller, __entry->width, + TP_printk("%pS -> %pS width=%d val=%#llx addr=%#lx", + (void *)__entry->caller0, (void *)__entry->caller, __entry->width, __entry->val, __entry->addr) );
diff --git a/init/main.c b/init/main.c index e46aa00b3c99..aab33b204f88 100644 --- a/init/main.c +++ b/init/main.c @@ -533,6 +533,12 @@ static int __init unknown_bootoption(char *param, char *val, const char *unused, void *arg) { size_t len = strlen(param); + /* + * Well-known bootloader identifiers: + * 1. LILO/Grub pass "BOOT_IMAGE=..."; + * 2. kexec/kdump (kexec-tools) pass "kexec". + */ + const char *bootloader[] = { "BOOT_IMAGE=", "kexec", NULL };
/* Handle params aliased to sysctls */ if (sysctl_is_alias(param)) @@ -540,6 +546,12 @@ static int __init unknown_bootoption(char *param, char *val,
repair_env_string(param, val);
+ /* Handle bootloader identifier */ + for (int i = 0; bootloader[i]; i++) { + if (strstarts(param, bootloader[i])) + return 0; + } + /* Handle obsolete-style parameters */ if (obsolete_checksetup(param)) return 0; diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 4f841e16779e..9b184f3fbf45 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -610,7 +610,7 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root) return 0; }
-static void bpf_free_inode(struct inode *inode) +static void bpf_destroy_inode(struct inode *inode) { enum bpf_type type;
@@ -625,7 +625,7 @@ static const struct super_operations bpf_super_ops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, .show_options = bpf_show_options, - .free_inode = bpf_free_inode, + .destroy_inode = bpf_destroy_inode, };
enum { diff --git a/kernel/fork.c b/kernel/fork.c index da318028aa88..cbd68079c422 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1641,7 +1641,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct *tsk) return 0; }
-static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) +static int copy_sighand(u64 clone_flags, struct task_struct *tsk) { struct sighand_struct *sig;
diff --git a/kernel/pid.c b/kernel/pid.c index 8bce3aebc949..e1d0c9d95227 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -474,7 +474,7 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) struct upid *upid; pid_t nr = 0;
- if (pid && ns->level <= pid->level) { + if (pid && ns && ns->level <= pid->level) { upid = &pid->numbers[ns->level]; if (upid->ns == ns) nr = upid->nr; diff --git a/kernel/rseq.c b/kernel/rseq.c index 840927ac417b..2fea2782e179 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -226,12 +226,12 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
/* * Load and clear event mask atomically with respect to - * scheduler preemption. + * scheduler preemption and membarrier IPIs. */ - preempt_disable(); - event_mask = t->rseq_event_mask; - t->rseq_event_mask = 0; - preempt_enable(); + scoped_guard(RSEQ_EVENT_GUARD) { + event_mask = t->rseq_event_mask; + t->rseq_event_mask = 0; + }
return !!event_mask; } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 7f378fa0b6ed..75fe40c6d080 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2217,6 +2217,25 @@ static int find_later_rq(struct task_struct *task) return -1; }
+static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) +{ + struct task_struct *p; + + if (!has_pushable_dl_tasks(rq)) + return NULL; + + p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); + + WARN_ON_ONCE(rq->cpu != task_cpu(p)); + WARN_ON_ONCE(task_current(rq, p)); + WARN_ON_ONCE(p->nr_cpus_allowed <= 1); + + WARN_ON_ONCE(!task_on_rq_queued(p)); + WARN_ON_ONCE(!dl_task(p)); + + return p; +} + /* Locks the rq it finds */ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) { @@ -2244,12 +2263,37 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
/* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { - if (unlikely(task_rq(task) != rq || + /* + * double_lock_balance had to release rq->lock, in the + * meantime, task may no longer be fit to be migrated. + * Check the following to ensure that the task is + * still suitable for migration: + * 1. It is possible the task was scheduled, + * migrate_disabled was set and then got preempted, + * so we must check the task migration disable + * flag. + * 2. The CPU picked is in the task's affinity. + * 3. For throttled task (dl_task_offline_migration), + * check the following: + * - the task is not on the rq anymore (it was + * migrated) + * - the task is not on CPU anymore + * - the task is still a dl task + * - the task is not queued on the rq anymore + * 4. For the non-throttled task (push_dl_task), the + * check to ensure that this task is still at the + * head of the pushable tasks list is enough. + */ + if (unlikely(is_migration_disabled(task) || !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || - task_on_cpu(rq, task) || - !dl_task(task) || - is_migration_disabled(task) || - !task_on_rq_queued(task))) { + (task->dl.dl_throttled && + (task_rq(task) != rq || + task_on_cpu(rq, task) || + !dl_task(task) || + !task_on_rq_queued(task))) || + (!task->dl.dl_throttled && + task != pick_next_pushable_dl_task(rq)))) { + double_unlock_balance(rq, later_rq); later_rq = NULL; break; @@ -2272,25 +2316,6 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) return later_rq; }
-static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) -{ - struct task_struct *p; - - if (!has_pushable_dl_tasks(rq)) - return NULL; - - p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); - - WARN_ON_ONCE(rq->cpu != task_cpu(p)); - WARN_ON_ONCE(task_current(rq, p)); - WARN_ON_ONCE(p->nr_cpus_allowed <= 1); - - WARN_ON_ONCE(!task_on_rq_queued(p)); - WARN_ON_ONCE(!dl_task(p)); - - return p; -} - /* * See if the non running -deadline tasks on this rq * can be sent to some other CPU where they can preempt diff --git a/kernel/sys.c b/kernel/sys.c index 06a9a87a8d3e..3076e6530913 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1682,6 +1682,7 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, struct rlimit old, new; struct task_struct *tsk; unsigned int checkflags = 0; + bool need_tasklist; int ret;
if (old_rlim) @@ -1708,8 +1709,25 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, get_task_struct(tsk); rcu_read_unlock();
- ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, - old_rlim ? &old : NULL); + need_tasklist = !same_thread_group(tsk, current); + if (need_tasklist) { + /* + * Ensure we can't race with group exit or de_thread(), + * so tsk->group_leader can't be freed or changed until + * read_unlock(tasklist_lock) below. + */ + read_lock(&tasklist_lock); + if (!pid_alive(tsk)) + ret = -ESRCH; + } + + if (!ret) { + ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, + old_rlim ? &old : NULL); + } + + if (need_tasklist) + read_unlock(&tasklist_lock);
if (!ret && old_rlim) { rlim_to_rlim64(&old, &old64); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index cc155c411768..a87e9eb7f37b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1715,14 +1715,15 @@ static int kprobe_register(struct trace_event_call *event, static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) { struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); + unsigned int flags = trace_probe_load_flag(&tk->tp); int ret = 0;
raw_cpu_inc(*tk->nhit);
- if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE)) + if (flags & TP_FLAG_TRACE) kprobe_trace_func(tk, regs); #ifdef CONFIG_PERF_EVENTS - if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE)) + if (flags & TP_FLAG_PROFILE) ret = kprobe_perf_func(tk, regs); #endif return ret; @@ -1734,6 +1735,7 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) { struct kretprobe *rp = get_kretprobe(ri); struct trace_kprobe *tk; + unsigned int flags;
/* * There is a small chance that get_kretprobe(ri) returns NULL when @@ -1746,10 +1748,11 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) tk = container_of(rp, struct trace_kprobe, rp); raw_cpu_inc(*tk->nhit);
- if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE)) + flags = trace_probe_load_flag(&tk->tp); + if (flags & TP_FLAG_TRACE) kretprobe_trace_func(tk, ri, regs); #ifdef CONFIG_PERF_EVENTS - if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE)) + if (flags & TP_FLAG_PROFILE) kretprobe_perf_func(tk, ri, regs); #endif return 0; /* We don't tweak kernel, so just return 0 */ diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index f48b3ed20b09..aab5066667ee 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -257,16 +257,21 @@ struct event_file_link { struct list_head list; };
+static inline unsigned int trace_probe_load_flag(struct trace_probe *tp) +{ + return smp_load_acquire(&tp->event->flags); +} + static inline bool trace_probe_test_flag(struct trace_probe *tp, unsigned int flag) { - return !!(tp->event->flags & flag); + return !!(trace_probe_load_flag(tp) & flag); }
static inline void trace_probe_set_flag(struct trace_probe *tp, unsigned int flag) { - tp->event->flags |= flag; + smp_store_release(&tp->event->flags, tp->event->flags | flag); }
static inline void trace_probe_clear_flag(struct trace_probe *tp, diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 53ef3cb65098..a930ef372ef4 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1497,6 +1497,7 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) struct trace_uprobe *tu; struct uprobe_dispatch_data udd; struct uprobe_cpu_buffer *ucb; + unsigned int flags; int ret = 0;
tu = container_of(con, struct trace_uprobe, consumer); @@ -1512,11 +1513,12 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
ucb = prepare_uprobe_buffer(tu, regs);
- if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) + flags = trace_probe_load_flag(&tu->tp); + if (flags & TP_FLAG_TRACE) ret |= uprobe_trace_func(tu, regs, ucb);
#ifdef CONFIG_PERF_EVENTS - if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) + if (flags & TP_FLAG_PROFILE) ret |= uprobe_perf_func(tu, regs, ucb); #endif uprobe_buffer_put(ucb); @@ -1529,6 +1531,7 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, struct trace_uprobe *tu; struct uprobe_dispatch_data udd; struct uprobe_cpu_buffer *ucb; + unsigned int flags;
tu = container_of(con, struct trace_uprobe, consumer);
@@ -1541,11 +1544,13 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, return 0;
ucb = prepare_uprobe_buffer(tu, regs); - if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) + + flags = trace_probe_load_flag(&tu->tp); + if (flags & TP_FLAG_TRACE) uretprobe_trace_func(tu, func, regs, ucb);
#ifdef CONFIG_PERF_EVENTS - if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) + if (flags & TP_FLAG_PROFILE) uretprobe_perf_func(tu, func, regs, ucb); #endif uprobe_buffer_put(ucb); diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index c852f067ab06..4d787e8e3606 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -25,6 +25,10 @@ obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519-generic.o libcurve25519-generic-y := curve25519-fiat32.o libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o libcurve25519-generic-y += curve25519-generic.o +# clang versions prior to 18 may blow out the stack with KASAN +ifeq ($(call clang-min-version, 180000),) +KASAN_SANITIZE_curve25519-hacl64.o := n +endif
obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o libcurve25519-y += curve25519.o diff --git a/lib/genalloc.c b/lib/genalloc.c index 00fc50d0a640..1c1317d4845e 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -899,8 +899,11 @@ struct gen_pool *of_gen_pool_get(struct device_node *np, if (!name) name = np_pool->name; } - if (pdev) + if (pdev) { pool = gen_pool_get(&pdev->dev, name); + put_device(&pdev->dev); + } + of_node_put(np_pool);
return pool; diff --git a/lib/trace_readwrite.c b/lib/trace_readwrite.c index 88637038b30c..62b4e8b3c733 100644 --- a/lib/trace_readwrite.c +++ b/lib/trace_readwrite.c @@ -14,33 +14,33 @@
#ifdef CONFIG_TRACE_MMIO_ACCESS void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, - unsigned long caller_addr) + unsigned long caller_addr, unsigned long caller_addr0) { - trace_rwmmio_write(caller_addr, val, width, addr); + trace_rwmmio_write(caller_addr, caller_addr0, val, width, addr); } EXPORT_SYMBOL_GPL(log_write_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_write);
void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, - unsigned long caller_addr) + unsigned long caller_addr, unsigned long caller_addr0) { - trace_rwmmio_post_write(caller_addr, val, width, addr); + trace_rwmmio_post_write(caller_addr, caller_addr0, val, width, addr); } EXPORT_SYMBOL_GPL(log_post_write_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_write);
void log_read_mmio(u8 width, const volatile void __iomem *addr, - unsigned long caller_addr) + unsigned long caller_addr, unsigned long caller_addr0) { - trace_rwmmio_read(caller_addr, width, addr); + trace_rwmmio_read(caller_addr, caller_addr0, width, addr); } EXPORT_SYMBOL_GPL(log_read_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_read);
void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, - unsigned long caller_addr) + unsigned long caller_addr, unsigned long caller_addr0) { - trace_rwmmio_post_read(caller_addr, val, width, addr); + trace_rwmmio_post_read(caller_addr, caller_addr0, val, width, addr); } EXPORT_SYMBOL_GPL(log_post_read_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_read); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5c25bde7b38d..77c1ac7a0591 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3324,6 +3324,9 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) return; }
+ if (!h->max_huge_pages) + return; + /* do node specific alloc */ for_each_online_node(i) { if (h->max_huge_pages_node[i] > 0) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5a2bae590ed7..86066a2cf258 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4887,7 +4887,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) if (!(gfp_mask & __GFP_NOMEMALLOC)) { alloc_flags |= ALLOC_NON_BLOCK;
- if (order > 0) + if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE)) alloc_flags |= ALLOC_HIGHATOMIC; }
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 23a82567fe62..54b0f24eb08f 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -1455,7 +1455,7 @@ void br_vlan_fill_forward_path_pvid(struct net_bridge *br, if (!br_opt_get(br, BROPT_VLAN_ENABLED)) return;
- vg = br_vlan_group(br); + vg = br_vlan_group_rcu(br);
if (idx >= 0 && ctx->vlan[idx].proto == br->vlan_proto) { diff --git a/net/core/filter.c b/net/core/filter.c index 183ede9345e6..9b4254feefcc 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2266,6 +2266,7 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(dst)) goto out_drop;
+ skb_dst_drop(skb); skb_dst_set(skb, dst); } else if (nh->nh_family != AF_INET6) { goto out_drop; @@ -2375,6 +2376,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, goto out_drop; }
+ skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5ee1e1c2082c..1820e297e8ea 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -7125,7 +7125,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, &foc, TCP_SYNACK_FASTOPEN, skb); /* Add the child socket directly into the accept queue */ if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) { - reqsk_fastopen_remove(fastopen_sk, req, false); bh_unlock_sock(fastopen_sk); sock_put(fastopen_sk); goto drop_and_free; diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 6392973b1fa7..f1a8ae7a5af4 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -225,9 +225,12 @@ void mptcp_pm_add_addr_received(const struct sock *ssk, } else { __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); } - /* id0 should not have a different address */ + /* - id0 should not have a different address + * - special case for C-flag: linked to fill_local_addresses_vec() + */ } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) || - (addr->id > 0 && !READ_ONCE(pm->accept_addr))) { + (addr->id > 0 && !READ_ONCE(pm->accept_addr) && + !mptcp_pm_add_addr_c_flag_case(msk))) { mptcp_pm_announce_addr(msk, addr, true); mptcp_pm_add_addr_send_ack(msk); } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 7e72862a6b54..614ddae9c0a5 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -671,6 +671,7 @@ static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk) * and return the array size. */ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, + struct mptcp_addr_info *remote, struct mptcp_addr_info *addrs) { struct sock *sk = (struct sock *)msk; @@ -678,10 +679,12 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info mpc_addr; struct pm_nl_pernet *pernet; unsigned int subflows_max; + bool c_flag_case; int i = 0;
pernet = pm_nl_get_pernet_from_msk(msk); subflows_max = mptcp_pm_get_subflows_max(msk); + c_flag_case = remote->id && mptcp_pm_add_addr_c_flag_case(msk);
mptcp_local_address((struct sock_common *)msk, &mpc_addr);
@@ -701,11 +704,26 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, }
if (msk->pm.subflows < subflows_max) { + bool is_id0; + msk->pm.subflows++; addrs[i] = entry->addr;
+ is_id0 = mptcp_addresses_equal(&entry->addr, + &mpc_addr, + entry->addr.port); + + if (c_flag_case && + (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) { + __clear_bit(addrs[i].id, + msk->pm.id_avail_bitmap); + + if (!is_id0) + msk->pm.local_addr_used++; + } + /* Special case for ID0: set the correct ID */ - if (mptcp_addresses_equal(&entry->addr, &mpc_addr, entry->addr.port)) + if (is_id0) addrs[i].id = 0;
i++; @@ -713,6 +731,39 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, } rcu_read_unlock();
+ /* Special case: peer sets the C flag, accept one ADD_ADDR if default + * limits are used -- accepting no ADD_ADDR -- and use subflow endpoints + */ + if (!i && c_flag_case) { + unsigned int local_addr_max = mptcp_pm_get_local_addr_max(msk); + + while (msk->pm.local_addr_used < local_addr_max && + msk->pm.subflows < subflows_max) { + struct mptcp_pm_addr_entry local; + + if (!select_local_address(pernet, msk, &local)) + break; + + __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); + + if (!mptcp_pm_addr_families_match(sk, &local.addr, + remote)) + continue; + + if (mptcp_addresses_equal(&local.addr, &mpc_addr, + local.addr.port)) + continue; + + addrs[i] = local.addr; + + msk->pm.local_addr_used++; + msk->pm.subflows++; + i++; + } + + return i; + } + /* If the array is empty, fill in the single * 'IPADDRANY' local address */ @@ -760,7 +811,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) /* connect to the specified remote address, using whatever * local address the routing configuration will pick. */ - nr = fill_local_addresses_vec(msk, addrs); + nr = fill_local_addresses_vec(msk, &remote, addrs);
spin_unlock_bh(&msk->pm.lock); for (i = 0; i < nr; i++) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index e1637443203e..81507419c465 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -958,6 +958,14 @@ static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk) spin_unlock_bh(&msk->pm.lock); }
+static inline bool mptcp_pm_add_addr_c_flag_case(struct mptcp_sock *msk) +{ + return READ_ONCE(msk->pm.remote_deny_join_id0) && + msk->pm.local_addr_used == 0 && + mptcp_pm_get_add_addr_accept_max(msk) == 0 && + msk->pm.subflows < mptcp_pm_get_subflows_max(msk); +} + void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk); void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index c7503fd64915..088764ba47a1 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -31,6 +31,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/hash.h> +#include <crypto/algapi.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/ip.h> @@ -1796,7 +1797,7 @@ struct sctp_association *sctp_unpack_cookie( } }
- if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { + if (crypto_memneq(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { *error = -SCTP_IERROR_BAD_SIG; goto fail; } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 4848d5d50a5f..beae92ad25bb 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <crypto/algapi.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/ip.h> @@ -885,7 +886,8 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net, return SCTP_DISPOSITION_CONSUME;
nomem_authev: - sctp_ulpevent_free(ai_ev); + if (ai_ev) + sctp_ulpevent_free(ai_ev); nomem_aiev: sctp_ulpevent_free(ev); nomem_ev: @@ -4417,7 +4419,7 @@ static enum sctp_ierror sctp_sf_authenticate( sh_key, GFP_ATOMIC);
/* Discard the packet if the digests do not match */ - if (memcmp(save_digest, digest, sig_len)) { + if (crypto_memneq(save_digest, digest, sig_len)) { kfree(save_digest); return SCTP_IERROR_BAD_SIG; } diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c index aa108bea6739..4863ee08b7b1 100644 --- a/security/keys/trusted-keys/trusted_tpm1.c +++ b/security/keys/trusted-keys/trusted_tpm1.c @@ -7,6 +7,7 @@ */
#include <crypto/hash_info.h> +#include <crypto/algapi.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/parser.h> @@ -241,7 +242,7 @@ int TSS_checkhmac1(unsigned char *buffer, if (ret < 0) goto out;
- if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) + if (crypto_memneq(testhmac, authdata, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kfree_sensitive(sdesc); @@ -334,7 +335,7 @@ static int TSS_checkhmac2(unsigned char *buffer, TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0); if (ret < 0) goto out; - if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) { + if (crypto_memneq(testhmac1, authdata1, SHA1_DIGEST_SIZE)) { ret = -EINVAL; goto out; } @@ -343,7 +344,7 @@ static int TSS_checkhmac2(unsigned char *buffer, TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0); if (ret < 0) goto out; - if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) + if (crypto_memneq(testhmac2, authdata2, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kfree_sensitive(sdesc); diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c index 04c50f9acda1..90fb774e5986 100644 --- a/sound/soc/codecs/wcd934x.c +++ b/sound/soc/codecs/wcd934x.c @@ -5862,6 +5862,13 @@ static const struct snd_soc_component_driver wcd934x_component_drv = { .endianness = 1, };
+static void wcd934x_put_device_action(void *data) +{ + struct device *dev = data; + + put_device(dev); +} + static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd) { struct device *dev = &wcd->sdev->dev; @@ -5882,11 +5889,12 @@ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd) }
slim_get_logical_addr(wcd->sidev); - wcd->if_regmap = regmap_init_slimbus(wcd->sidev, + wcd->if_regmap = devm_regmap_init_slimbus(wcd->sidev, &wcd934x_ifc_regmap_config); if (IS_ERR(wcd->if_regmap)) { - dev_err(dev, "Failed to allocate ifc register map\n"); - return PTR_ERR(wcd->if_regmap); + put_device(&wcd->sidev->dev); + return dev_err_probe(dev, PTR_ERR(wcd->if_regmap), + "Failed to allocate ifc register map\n"); }
of_property_read_u32(dev->parent->of_node, "qcom,dmic-sample-rate", @@ -5931,6 +5939,10 @@ static int wcd934x_codec_probe(struct platform_device *pdev) return ret; }
+ ret = devm_add_action_or_reset(dev, wcd934x_put_device_action, &wcd->sidev->dev); + if (ret) + return ret; + /* set default rate 9P6MHz */ regmap_update_bits(wcd->regmap, WCD934X_CODEC_RPM_CLK_MCLK_CFG, WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK, @@ -5939,19 +5951,15 @@ static int wcd934x_codec_probe(struct platform_device *pdev) memcpy(wcd->tx_chs, wcd934x_tx_chs, sizeof(wcd934x_tx_chs));
irq = regmap_irq_get_virq(data->irq_data, WCD934X_IRQ_SLIMBUS); - if (irq < 0) { - dev_err(wcd->dev, "Failed to get SLIM IRQ\n"); - return irq; - } + if (irq < 0) + return dev_err_probe(wcd->dev, irq, "Failed to get SLIM IRQ\n");
ret = devm_request_threaded_irq(dev, irq, NULL, wcd934x_slim_irq_handler, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "slim", wcd); - if (ret) { - dev_err(dev, "Failed to request slimbus irq\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "Failed to request slimbus irq\n");
wcd934x_register_mclk_output(wcd); platform_set_drvdata(pdev, wcd); diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 690fe97be190..1b7886cbbb65 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -282,10 +282,10 @@ $(OUTPUT)test-libbabeltrace.bin: $(BUILD) # -lbabeltrace provided by $(FEATURE_CHECK_LDFLAGS-libbabeltrace)
$(OUTPUT)test-compile-32.bin: - $(CC) -m32 -o $@ test-compile.c + $(CC) -m32 -Wall -Werror -o $@ test-compile.c
$(OUTPUT)test-compile-x32.bin: - $(CC) -mx32 -o $@ test-compile.c + $(CC) -mx32 -Wall -Werror -o $@ test-compile.c
$(OUTPUT)test-zlib.bin: $(BUILD) -lz diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h index ad47d7b31046..91db42a9f658 100644 --- a/tools/lib/perf/include/perf/event.h +++ b/tools/lib/perf/include/perf/event.h @@ -273,6 +273,7 @@ struct perf_record_header_event_type { struct perf_record_header_tracing_data { struct perf_event_header header; __u32 size; + __u32 pad; };
#define PERF_RECORD_MISC_BUILD_ID_SIZE (1 << 15) diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c index 7aa946aa886d..c410ce87323b 100644 --- a/tools/perf/tests/perf-record.c +++ b/tools/perf/tests/perf-record.c @@ -113,6 +113,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest if (err < 0) { pr_debug("sched__get_first_possible_cpu: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); + evlist__cancel_workload(evlist); goto out_delete_evlist; }
@@ -124,6 +125,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { pr_debug("sched_setaffinity: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); + evlist__cancel_workload(evlist); goto out_delete_evlist; }
@@ -135,6 +137,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest if (err < 0) { pr_debug("perf_evlist__open: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); + evlist__cancel_workload(evlist); goto out_delete_evlist; }
@@ -147,6 +150,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest if (err < 0) { pr_debug("evlist__mmap: %s\n", str_error_r(errno, sbuf, sizeof(sbuf))); + evlist__cancel_workload(evlist); goto out_delete_evlist; }
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c index 091987dd3966..709d3f6b58c6 100644 --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c @@ -186,11 +186,27 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder) decoder->record.context_id = payload; break; case ARM_SPE_OP_TYPE: - if (idx == SPE_OP_PKT_HDR_CLASS_LD_ST_ATOMIC) { - if (payload & 0x1) - decoder->record.op = ARM_SPE_ST; + switch (idx) { + case SPE_OP_PKT_HDR_CLASS_LD_ST_ATOMIC: + decoder->record.op |= ARM_SPE_OP_LDST; + if (payload & SPE_OP_PKT_ST) + decoder->record.op |= ARM_SPE_OP_ST; else - decoder->record.op = ARM_SPE_LD; + decoder->record.op |= ARM_SPE_OP_LD; + if (SPE_OP_PKT_IS_LDST_SVE(payload)) + decoder->record.op |= ARM_SPE_OP_SVE_LDST; + break; + case SPE_OP_PKT_HDR_CLASS_OTHER: + decoder->record.op |= ARM_SPE_OP_OTHER; + if (SPE_OP_PKT_IS_OTHER_SVE_OP(payload)) + decoder->record.op |= ARM_SPE_OP_SVE_OTHER; + break; + case SPE_OP_PKT_HDR_CLASS_BR_ERET: + decoder->record.op |= ARM_SPE_OP_BRANCH_ERET; + break; + default: + pr_err("Get packet error!\n"); + return -1; } break; case ARM_SPE_EVENTS: @@ -218,6 +234,12 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder) if (payload & BIT(EV_MISPRED)) decoder->record.type |= ARM_SPE_BRANCH_MISS;
+ if (payload & BIT(EV_PARTIAL_PREDICATE)) + decoder->record.type |= ARM_SPE_SVE_PARTIAL_PRED; + + if (payload & BIT(EV_EMPTY_PREDICATE)) + decoder->record.type |= ARM_SPE_SVE_EMPTY_PRED; + break; case ARM_SPE_DATA_SOURCE: decoder->record.source = payload; diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h index 46a61df1145b..358c611eeddb 100644 --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h @@ -14,30 +14,57 @@ #include "arm-spe-pkt-decoder.h"
enum arm_spe_sample_type { - ARM_SPE_L1D_ACCESS = 1 << 0, - ARM_SPE_L1D_MISS = 1 << 1, - ARM_SPE_LLC_ACCESS = 1 << 2, - ARM_SPE_LLC_MISS = 1 << 3, - ARM_SPE_TLB_ACCESS = 1 << 4, - ARM_SPE_TLB_MISS = 1 << 5, - ARM_SPE_BRANCH_MISS = 1 << 6, - ARM_SPE_REMOTE_ACCESS = 1 << 7, + ARM_SPE_L1D_ACCESS = 1 << 0, + ARM_SPE_L1D_MISS = 1 << 1, + ARM_SPE_LLC_ACCESS = 1 << 2, + ARM_SPE_LLC_MISS = 1 << 3, + ARM_SPE_TLB_ACCESS = 1 << 4, + ARM_SPE_TLB_MISS = 1 << 5, + ARM_SPE_BRANCH_MISS = 1 << 6, + ARM_SPE_REMOTE_ACCESS = 1 << 7, + ARM_SPE_SVE_PARTIAL_PRED = 1 << 8, + ARM_SPE_SVE_EMPTY_PRED = 1 << 9, };
enum arm_spe_op_type { - ARM_SPE_LD = 1 << 0, - ARM_SPE_ST = 1 << 1, + /* First level operation type */ + ARM_SPE_OP_OTHER = 1 << 0, + ARM_SPE_OP_LDST = 1 << 1, + ARM_SPE_OP_BRANCH_ERET = 1 << 2, + + /* Second level operation type for OTHER */ + ARM_SPE_OP_SVE_OTHER = 1 << 16, + ARM_SPE_OP_SVE_FP = 1 << 17, + ARM_SPE_OP_SVE_PRED_OTHER = 1 << 18, + + /* Second level operation type for LDST */ + ARM_SPE_OP_LD = 1 << 16, + ARM_SPE_OP_ST = 1 << 17, + ARM_SPE_OP_ATOMIC = 1 << 18, + ARM_SPE_OP_EXCL = 1 << 19, + ARM_SPE_OP_AR = 1 << 20, + ARM_SPE_OP_SIMD_FP = 1 << 21, + ARM_SPE_OP_GP_REG = 1 << 22, + ARM_SPE_OP_UNSPEC_REG = 1 << 23, + ARM_SPE_OP_NV_SYSREG = 1 << 24, + ARM_SPE_OP_SVE_LDST = 1 << 25, + ARM_SPE_OP_SVE_PRED_LDST = 1 << 26, + ARM_SPE_OP_SVE_SG = 1 << 27, + + /* Second level operation type for BRANCH_ERET */ + ARM_SPE_OP_BR_COND = 1 << 16, + ARM_SPE_OP_BR_INDIRECT = 1 << 17, };
-enum arm_spe_neoverse_data_source { - ARM_SPE_NV_L1D = 0x0, - ARM_SPE_NV_L2 = 0x8, - ARM_SPE_NV_PEER_CORE = 0x9, - ARM_SPE_NV_LOCAL_CLUSTER = 0xa, - ARM_SPE_NV_SYS_CACHE = 0xb, - ARM_SPE_NV_PEER_CLUSTER = 0xc, - ARM_SPE_NV_REMOTE = 0xd, - ARM_SPE_NV_DRAM = 0xe, +enum arm_spe_common_data_source { + ARM_SPE_COMMON_DS_L1D = 0x0, + ARM_SPE_COMMON_DS_L2 = 0x8, + ARM_SPE_COMMON_DS_PEER_CORE = 0x9, + ARM_SPE_COMMON_DS_LOCAL_CLUSTER = 0xa, + ARM_SPE_COMMON_DS_SYS_CACHE = 0xb, + ARM_SPE_COMMON_DS_PEER_CLUSTER = 0xc, + ARM_SPE_COMMON_DS_REMOTE = 0xd, + ARM_SPE_COMMON_DS_DRAM = 0xe, };
struct arm_spe_record { diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 906476a839e1..383eda614f5c 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -389,15 +389,15 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq, return arm_spe_deliver_synth_event(spe, speq, event, &sample); }
-static const struct midr_range neoverse_spe[] = { +static const struct midr_range common_ds_encoding_cpus[] = { MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), {}, };
-static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record, - union perf_mem_data_src *data_src) +static void arm_spe__synth_data_source_common(const struct arm_spe_record *record, + union perf_mem_data_src *data_src) { /* * Even though four levels of cache hierarchy are possible, no known @@ -411,7 +411,7 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec * We have no data on the hit level or data source for stores in the * Neoverse SPE records. */ - if (record->op & ARM_SPE_ST) { + if (record->op & ARM_SPE_OP_ST) { data_src->mem_lvl = PERF_MEM_LVL_NA; data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA; data_src->mem_snoop = PERF_MEM_SNOOP_NA; @@ -419,17 +419,17 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec }
switch (record->source) { - case ARM_SPE_NV_L1D: + case ARM_SPE_COMMON_DS_L1D: data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1; data_src->mem_snoop = PERF_MEM_SNOOP_NONE; break; - case ARM_SPE_NV_L2: + case ARM_SPE_COMMON_DS_L2: data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2; data_src->mem_snoop = PERF_MEM_SNOOP_NONE; break; - case ARM_SPE_NV_PEER_CORE: + case ARM_SPE_COMMON_DS_PEER_CORE: data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2; data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER; @@ -438,8 +438,8 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec * We don't know if this is L1, L2 but we do know it was a cache-2-cache * transfer, so set SNOOPX_PEER */ - case ARM_SPE_NV_LOCAL_CLUSTER: - case ARM_SPE_NV_PEER_CLUSTER: + case ARM_SPE_COMMON_DS_LOCAL_CLUSTER: + case ARM_SPE_COMMON_DS_PEER_CLUSTER: data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3; data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER; @@ -447,7 +447,7 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec /* * System cache is assumed to be L3 */ - case ARM_SPE_NV_SYS_CACHE: + case ARM_SPE_COMMON_DS_SYS_CACHE: data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3; data_src->mem_snoop = PERF_MEM_SNOOP_HIT; @@ -456,13 +456,13 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec * We don't know what level it hit in, except it came from the other * socket */ - case ARM_SPE_NV_REMOTE: - data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1; - data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE; + case ARM_SPE_COMMON_DS_REMOTE: + data_src->mem_lvl = PERF_MEM_LVL_NA; + data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA; data_src->mem_remote = PERF_MEM_REMOTE_REMOTE; data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER; break; - case ARM_SPE_NV_DRAM: + case ARM_SPE_COMMON_DS_DRAM: data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT; data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM; data_src->mem_snoop = PERF_MEM_SNOOP_NONE; @@ -492,23 +492,23 @@ static void arm_spe__synth_data_source_generic(const struct arm_spe_record *reco }
if (record->type & ARM_SPE_REMOTE_ACCESS) - data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1; + data_src->mem_remote = PERF_MEM_REMOTE_REMOTE; }
static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr) { - union perf_mem_data_src data_src = { 0 }; - bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe); + union perf_mem_data_src data_src = { .mem_op = PERF_MEM_OP_NA }; + bool is_common = is_midr_in_range_list(midr, common_ds_encoding_cpus);
- if (record->op == ARM_SPE_LD) + if (record->op & ARM_SPE_OP_LD) data_src.mem_op = PERF_MEM_OP_LOAD; - else if (record->op == ARM_SPE_ST) + else if (record->op & ARM_SPE_OP_ST) data_src.mem_op = PERF_MEM_OP_STORE; else return 0;
- if (is_neoverse) - arm_spe__synth_data_source_neoverse(record, &data_src); + if (is_common) + arm_spe__synth_data_source_common(record, &data_src); else arm_spe__synth_data_source_generic(record, &data_src);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 22969cc00a5f..755da242f267 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -3146,6 +3146,8 @@ bool evsel__is_hybrid(struct evsel *evsel)
struct evsel *evsel__leader(struct evsel *evsel) { + if (evsel->core.leader == NULL) + return NULL; return container_of(evsel->core.leader, struct evsel, core); }
diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c index 51424cdc3b68..aa9a0ebc1f93 100644 --- a/tools/perf/util/lzma.c +++ b/tools/perf/util/lzma.c @@ -115,7 +115,7 @@ bool lzma_is_compressed(const char *input) ssize_t rc;
if (fd < 0) - return -1; + return false;
rc = read(fd, buf, sizeof(buf)); close(fd); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index c8f7634f9846..1c4d124b1053 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1658,7 +1658,7 @@ static s64 perf_session__process_user_event(struct perf_session *session, struct perf_tool *tool = session->tool; struct perf_sample sample = { .time = 0, }; int fd = perf_data__fd(session->data); - int err; + s64 err;
if (event->header.type != PERF_RECORD_COMPRESSED || tool->compressed == perf_session__process_compressed_event_stub) diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c index 78d2297c1b67..1f7c06523059 100644 --- a/tools/perf/util/zlib.c +++ b/tools/perf/util/zlib.c @@ -88,7 +88,7 @@ bool gzip_is_compressed(const char *input) ssize_t rc;
if (fd < 0) - return -1; + return false;
rc = read(fd, buf, sizeof(buf)); close(fd); diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index ed7c0193ffc3..f8589e7c9383 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -3024,6 +3024,17 @@ deny_join_id0_tests() run_tests $ns1 $ns2 10.0.1.1 chk_join_nr 1 1 1 fi + + # default limits, server deny join id 0 + signal + if reset_with_allow_join_id0 "default limits, server deny join id 0" 0 1; then + pm_nl_set_limits $ns1 0 2 + pm_nl_set_limits $ns2 0 2 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal + pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow + pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow + run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 2 2 2 + fi }
fullmesh_tests() diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index e20191fb40d4..036b03aaedc3 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -38,9 +38,9 @@ * Define weak versions to play nice with binaries that are statically linked * against a libc that doesn't support registering its own rseq. */ -__weak ptrdiff_t __rseq_offset; -__weak unsigned int __rseq_size; -__weak unsigned int __rseq_flags; +extern __weak ptrdiff_t __rseq_offset; +extern __weak unsigned int __rseq_size; +extern __weak unsigned int __rseq_flags;
static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset; static const unsigned int *libc_rseq_size_p = &__rseq_size; @@ -124,7 +124,7 @@ void rseq_init(void) * libc not having registered a restartable sequence. Try to find the * symbols if that's the case. */ - if (!*libc_rseq_size_p) { + if (!libc_rseq_size_p || !*libc_rseq_size_p) { libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); diff --git a/tools/testing/selftests/vm/madv_populate.c b/tools/testing/selftests/vm/madv_populate.c index 715a42e8e2cd..f37bda59c18c 100644 --- a/tools/testing/selftests/vm/madv_populate.c +++ b/tools/testing/selftests/vm/madv_populate.c @@ -274,12 +274,16 @@ static void test_softdirty(void)
int main(int argc, char **argv) { + int nr_tests = 16; int err;
pagesize = getpagesize();
+ if (softdirty_supported()) + nr_tests += 5; + ksft_print_header(); - ksft_set_plan(21); + ksft_set_plan(nr_tests);
sense_support(); test_prot_read(); @@ -287,7 +291,8 @@ int main(int argc, char **argv) test_holes(); test_populate_read(); test_populate_write(); - test_softdirty(); + if (softdirty_supported()) + test_softdirty();
err = ksft_get_fail_cnt(); if (err) diff --git a/tools/testing/selftests/vm/soft-dirty.c b/tools/testing/selftests/vm/soft-dirty.c index 21d8830c5f24..615ab2d204ef 100644 --- a/tools/testing/selftests/vm/soft-dirty.c +++ b/tools/testing/selftests/vm/soft-dirty.c @@ -190,8 +190,11 @@ int main(int argc, char **argv) int pagesize;
ksft_print_header(); - ksft_set_plan(15);
+ if (!softdirty_supported()) + ksft_exit_skip("soft-dirty is not support\n"); + + ksft_set_plan(15); pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY); if (pagemap_fd < 0) ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH); diff --git a/tools/testing/selftests/vm/vm_util.c b/tools/testing/selftests/vm/vm_util.c index f11f8adda521..fc5743bc1283 100644 --- a/tools/testing/selftests/vm/vm_util.c +++ b/tools/testing/selftests/vm/vm_util.c @@ -72,6 +72,42 @@ uint64_t read_pmd_pagesize(void) return strtoul(buf, NULL, 10); }
+char *__get_smap_entry(void *addr, const char *pattern, char *buf, size_t len) +{ + int ret; + FILE *fp; + char *entry = NULL; + char addr_pattern[MAX_LINE_LENGTH]; + + ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-", + (unsigned long)addr); + if (ret >= MAX_LINE_LENGTH) + ksft_exit_fail_msg("%s: Pattern is too long\n", __func__); + + fp = fopen(SMAP_FILE_PATH, "r"); + if (!fp) + ksft_exit_fail_msg("%s: Failed to open file %s\n", __func__, + SMAP_FILE_PATH); + + if (!check_for_pattern(fp, addr_pattern, buf, len)) + goto err_out; + + /* Fetch the pattern in the same block */ + if (!check_for_pattern(fp, pattern, buf, len)) + goto err_out; + + /* Trim trailing newline */ + entry = strchr(buf, '\n'); + if (entry) + *entry = '\0'; + + entry = buf + strlen(pattern); + +err_out: + fclose(fp); + return entry; +} + bool __check_huge(void *addr, char *pattern, int nr_hpages, uint64_t hpage_size) { @@ -124,3 +160,44 @@ bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size) { return __check_huge(addr, "ShmemPmdMapped:", nr_hpages, hpage_size); } + +static bool check_vmflag(void *addr, const char *flag) +{ + char buffer[MAX_LINE_LENGTH]; + const char *flags; + size_t flaglen; + + flags = __get_smap_entry(addr, "VmFlags:", buffer, sizeof(buffer)); + if (!flags) + ksft_exit_fail_msg("%s: No VmFlags for %p\n", __func__, addr); + + while (true) { + flags += strspn(flags, " "); + + flaglen = strcspn(flags, " "); + if (!flaglen) + return false; + + if (flaglen == strlen(flag) && !memcmp(flags, flag, flaglen)) + return true; + + flags += flaglen; + } +} + +bool softdirty_supported(void) +{ + char *addr; + bool supported = false; + const size_t pagesize = getpagesize(); + + /* New mappings are expected to be marked with VM_SOFTDIRTY (sd). */ + addr = mmap(0, pagesize, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); + if (!addr) + ksft_exit_fail_msg("mmap failed\n"); + + supported = check_vmflag(addr, "sd"); + munmap(addr, pagesize); + return supported; +} diff --git a/tools/testing/selftests/vm/vm_util.h b/tools/testing/selftests/vm/vm_util.h index 5c35de454e08..470f85fe9594 100644 --- a/tools/testing/selftests/vm/vm_util.h +++ b/tools/testing/selftests/vm/vm_util.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <stdint.h> #include <stdbool.h> +#include <sys/mman.h>
uint64_t pagemap_get_entry(int fd, char *start); bool pagemap_is_softdirty(int fd, char *start); @@ -10,3 +11,4 @@ uint64_t read_pmd_pagesize(void); bool check_huge_anon(void *addr, int nr_hpages, uint64_t hpage_size); bool check_huge_file(void *addr, int nr_hpages, uint64_t hpage_size); bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size); +bool softdirty_supported(void);
linux-stable-mirror@lists.linaro.org