I'm announcing the release of the 5.10.146 kernel.
All users of the 5.10 kernel series must upgrade.
The updated 5.10.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-5.10.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Makefile | 2 arch/arm64/Kconfig | 5 arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts | 5 arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi | 9 arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi | 1 arch/mips/lantiq/clk.c | 1 arch/mips/loongson32/common/platform.c | 16 arch/riscv/kernel/signal.c | 2 arch/x86/include/asm/kvm_host.h | 1 arch/x86/kvm/svm/sev.c | 8 arch/x86/kvm/svm/svm.c | 1 arch/x86/kvm/svm/svm.h | 2 arch/x86/kvm/x86.c | 6 drivers/dax/hmem/device.c | 1 drivers/dma/ti/k3-udma-private.c | 6 drivers/firmware/efi/libstub/secureboot.c | 8 drivers/firmware/efi/libstub/x86-stub.c | 7 drivers/gpio/gpio-mockup.c | 2 drivers/gpio/gpiolib-cdev.c | 5 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 23 + drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 2 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 15 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 34 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 1 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 5 drivers/gpu/drm/amd/amdgpu/soc15.c | 25 - drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c | 3 drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 4 drivers/gpu/drm/gma500/gma_display.c | 11 drivers/gpu/drm/hisilicon/hibmc/Kconfig | 3 drivers/gpu/drm/mediatek/mtk_dsi.c | 24 - drivers/gpu/drm/panel/panel-simple.c | 2 drivers/gpu/drm/rockchip/cdn-dp-core.c | 5 drivers/hv/vmbus_drv.c | 10 drivers/i2c/busses/i2c-imx.c | 2 drivers/i2c/busses/i2c-mlxbf.c | 68 +--- drivers/interconnect/qcom/icc-rpmh.c | 10 drivers/interconnect/qcom/sm8150.c | 1 drivers/interconnect/qcom/sm8250.c | 1 drivers/iommu/intel/iommu.c | 2 drivers/media/usb/b2c2/flexcop-usb.c | 2 drivers/mmc/core/sd.c | 42 -- drivers/net/bonding/bond_3ad.c | 5 drivers/net/bonding/bond_main.c | 57 ++- drivers/net/can/flexcan.c | 10 drivers/net/can/usb/gs_usb.c | 4 drivers/net/ethernet/freescale/enetc/enetc.c | 32 - drivers/net/ethernet/freescale/enetc/enetc.h | 9 drivers/net/ethernet/freescale/enetc/enetc_pf.c | 11 drivers/net/ethernet/freescale/enetc/enetc_qos.c | 23 + drivers/net/ethernet/freescale/enetc/enetc_vf.c | 4 drivers/net/ethernet/intel/i40e/i40e_main.c | 32 + drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 20 + drivers/net/ethernet/intel/iavf/iavf_txrx.c | 9 drivers/net/ethernet/intel/iavf/iavf_virtchnl.c | 7 drivers/net/ethernet/sfc/efx_channels.c | 2 drivers/net/ethernet/sfc/tx.c | 2 drivers/net/ethernet/sun/sunhme.c | 4 drivers/net/ipa/gsi.c | 16 drivers/net/ipa/gsi_private.h | 2 drivers/net/ipa/gsi_trans.c | 9 drivers/net/ipa/ipa_cmd.c | 2 drivers/net/ipa/ipa_data.h | 4 drivers/net/ipa/ipa_qmi.c | 10 drivers/net/ipa/ipa_qmi_msg.c | 8 drivers/net/ipa/ipa_qmi_msg.h | 37 +- drivers/net/ipa/ipa_table.c | 88 ++--- drivers/net/ipa/ipa_table.h | 6 drivers/net/ipvlan/ipvlan_core.c | 6 drivers/net/mdio/of_mdio.c | 1 drivers/net/phy/aquantia_main.c | 53 +++ drivers/net/team/team.c | 24 + drivers/net/wireguard/netlink.c | 13 drivers/net/wireguard/selftest/ratelimiter.c | 25 - drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 2 drivers/s390/block/dasd_alias.c | 9 drivers/scsi/mpt3sas/mpt3sas_base.c | 161 +++++++--- drivers/scsi/mpt3sas/mpt3sas_base.h | 1 drivers/tty/serial/atmel_serial.c | 8 drivers/tty/serial/serial-tegra.c | 5 drivers/tty/serial/tegra-tcu.c | 2 drivers/usb/cdns3/gadget.c | 4 drivers/usb/core/hub.c | 2 drivers/usb/dwc3/core.c | 4 drivers/usb/dwc3/core.h | 4 drivers/usb/dwc3/gadget.c | 81 ++--- drivers/usb/host/xhci-mtk-sch.c | 149 +++++---- drivers/usb/host/xhci-mtk.h | 2 drivers/usb/serial/option.c | 6 drivers/usb/typec/mux/intel_pmc_mux.c | 37 +- drivers/vfio/vfio_iommu_type1.c | 110 +++++- fs/cifs/cifsproto.h | 2 fs/cifs/cifssmb.c | 6 fs/cifs/connect.c | 22 + fs/cifs/transport.c | 6 fs/ext4/extents.c | 4 fs/ext4/ialloc.c | 2 fs/ext4/mballoc.c | 4 fs/xfs/libxfs/xfs_inode_buf.c | 35 +- fs/xfs/xfs_inode.c | 36 +- include/linux/inetdevice.h | 9 include/linux/kvm_host.h | 2 include/linux/netdevice.h | 8 include/linux/serial_core.h | 17 + include/net/bond_3ad.h | 2 include/net/bonding.h | 3 kernel/workqueue.c | 6 mm/slub.c | 5 net/bridge/netfilter/ebtables.c | 4 net/core/dev_ioctl.c | 44 -- net/core/flow_dissector.c | 21 - net/ipv4/devinet.c | 4 net/netfilter/nf_conntrack_irc.c | 34 +- net/netfilter/nf_conntrack_sip.c | 4 net/netfilter/nf_tables_api.c | 8 net/netfilter/nfnetlink_osf.c | 4 net/sched/cls_api.c | 1 net/sched/sch_taprio.c | 18 - net/smc/smc_core.c | 5 sound/pci/hda/hda_intel.c | 2 sound/pci/hda/patch_hdmi.c | 1 sound/pci/hda/patch_realtek.c | 32 + tools/perf/util/genelf.c | 14 tools/perf/util/genelf.h | 4 tools/perf/util/symbol-elf.c | 7 tools/testing/selftests/net/forwarding/sch_red.sh | 1 virt/kvm/kvm_main.c | 16 127 files changed, 1195 insertions(+), 703 deletions(-)
Adrian Hunter (2): mmc: core: Fix inconsistent sd3_bus_mode at UHS-I SD voltage switch failure perf kcore_copy: Do not check /proc/modules is unchanged
Al Viro (1): riscv: fix a nasty sigreturn bug...
Alan Stern (1): USB: core: Fix RST error in hub.c
Alex Deucher (2): drm/amdgpu: move nbio sdma_doorbell_range() into sdma code for vega drm/amdgpu: make sure to init common IP before gmc
Alex Elder (6): net: ipa: fix assumptions about DMA address size net: ipa: fix table alignment requirement net: ipa: avoid 64-bit modulus net: ipa: DMA addresses are nicely aligned net: ipa: kill IPA_TABLE_ENTRY_SIZE net: ipa: properly limit modem routing table use
Alex Williamson (1): vfio/type1: Unpin zero pages
AngeloGioacchino Del Regno (1): drm/mediatek: dsi: Add atomic {destroy,duplicate}_state, reset callbacks
Ard Biesheuvel (2): efi: x86: Wipe setup_data on pure EFI boot efi: libstub: check Shim mode using MokSBStateRT
Arnd Bergmann (1): net: socket: remove register_gifconf
Asmaa Mnebhi (3): i2c: mlxbf: incorrect base address passed during io write i2c: mlxbf: prevent stack overflow in mlxbf_i2c_smbus_start_transaction() i2c: mlxbf: Fix frequency calculation
Azhar Shaikh (1): usb: typec: intel_pmc_mux: Update IOM port status offset for AlderLake
Bartosz Golaszewski (1): gpio: mockup: fix NULL pointer dereference when removing debugfs
Benjamin Poirier (3): net: bonding: Share lacpdu_mcast_addr definition net: bonding: Unsync device addresses on ndo_stop net: team: Unsync device addresses on ndo_stop
Brett Creeley (1): iavf: Fix cached head and tail value for iavf_get_tx_pending
Brian Norris (1): arm64: dts: rockchip: Pull up wlan wake# on Gru-Bob
Callum Osmotherly (2): ALSA: hda/realtek: Enable 4-speaker output Dell Precision 5570 laptop ALSA: hda/realtek: Enable 4-speaker output Dell Precision 5530 laptop
Carl Yin(殷张成) (1): USB: serial: option: add Quectel BG95 0x0203 composition
Chao Yu (1): mm/slub: fix to return errno if kmalloc() fails
Christoph Hellwig (1): xfs: fix up non-directory creation in SGID directories
Chunfeng Yun (7): usb: xhci-mtk: get the microframe boundary for ESIT usb: xhci-mtk: add only one extra CS for FS/LS INTR usb: xhci-mtk: use @sch_tt to check whether need do TT schedule usb: xhci-mtk: add a function to (un)load bandwidth info usb: xhci-mtk: add some schedule error number usb: xhci-mtk: allow multiple Start-Split in a microframe usb: xhci-mtk: fix issue of out-of-bounds array access
Dan Williams (1): devdax: Fix soft-reservation memory description
Daniel Jordan (3): vfio/type1: Change success value of vaddr_get_pfn() vfio/type1: Prepare for batched pinning with struct vfio_batch vfio/type1: fix vaddr_get_pfns() return in vfio_pin_page_external()
Dave Chinner (2): xfs: reorder iunlink remove operation in xfs_ifree xfs: validate inode fork size against fork format
David Howells (1): cifs: use discard iterator to discard unneeded network data more efficiently
David Leadbeater (1): netfilter: nf_conntrack_irc: Tighten matching on DCC message
Fabio Estevam (1): arm64: dts: rockchip: Remove 'enable-active-low' from rk3399-puma
Felix Fietkau (1): wifi: mt76: fix reading current per-tid starting sequence number for aggregation
Florian Westphal (1): netfilter: ebtables: fix memory leak when blob is malformed
Greg Kroah-Hartman (3): Revert "usb: add quirks for Lenovo OneLink+ Dock" Revert "usb: gadget: udc-xilinx: replace memcpy with memcpy_toio" Linux 5.10.146
Hamza Mahfooz (1): drm/amdgpu: use dirty framebuffer helper
Hangbin Liu (1): selftests: forwarding: add shebang for sch_red.sh
Hangyu Hua (1): net: sched: fix possible refcount leak in tc_new_tfilter()
Hans de Goede (1): drm/gma500: Fix BUG: sleeping function called from invalid context errors
Heiko Schocher (1): drm/panel: simple: Fix innolux_g121i1_l01 bus_format
Igor Ryzhov (1): netfilter: nf_conntrack_sip: fix ct_sip_walk_headers
Ikjoon Jang (1): usb: xhci-mtk: relax TT periodic bandwidth allocation
Ilpo Järvinen (3): serial: Create uart_xmit_advance() serial: tegra: Use uart_xmit_advance(), fixes icount.tx accounting serial: tegra-tcu: Use uart_xmit_advance(), fixes icount.tx accounting
Ioana Ciornei (1): net: phy: aquantia: wait for the suspend/resume operations to finish
Jan Kara (1): ext4: make directory inode spreading reflect flexbg size
Jason A. Donenfeld (2): wireguard: ratelimiter: disable timings test by default wireguard: netlink: avoid variable-sized memcpy on sockaddr
Javier Martinez Canillas (1): drm/hisilicon/hibmc: Allow to be built if COMPILE_TEST is enabled
Jean-Francois Le Fillatre (1): usb: add quirks for Lenovo OneLink+ Dock
Jingwen Chen (1): drm/amd/amdgpu: fixing read wrong pf2vf data in SRIOV
Johan Hovold (1): media: flexcop-usb: fix endpoint type check
Kai Vehmanen (1): ALSA: hda: add Intel 5 Series / 3400 PCI DID
Liang He (2): dmaengine: ti: k3-udma-private: Fix refcount leak bug in of_xudma_dev_get() of: mdio: Add of_node_put() when breaking out of for_each_xx
Lieven Hey (1): perf jit: Include program header in ELF files
Lino Sanfilippo (1): serial: atmel: remove redundant assignment in rs485_config
Lu Wei (1): ipvlan: Fix out-of-bound bugs caused by unset skb->mac_header
Luben Tuikov (1): drm/amdgpu: Fix check for RAS support
Ludovic Cintrat (1): net: core: fix flow symmetric hash
Luke D. Jones (3): ALSA: hda/realtek: Add pincfg for ASUS G513 HP jack ALSA: hda/realtek: Add pincfg for ASUS G533Z HP jack ALSA: hda/realtek: Add quirk for ASUS GA503R laptop
Luís Henriques (1): ext4: fix bug in extents parsing when eh_entries == 0 and eh_depth > 0
Marc Kleine-Budde (2): can: flexcan: flexcan_mailbox_read() fix return value for drop = true can: gs_usb: gs_can_open(): fix race dev->can.state condition
Mark Brown (1): arm64/bti: Disable in kernel BTI when cross section thunks are broken
Meng Li (1): gpiolib: cdev: Set lineevent_state::irq after IRQ register successfully
Michal Jaron (3): iavf: Fix set max MTU size with port VLAN and jumbo frames i40e: Fix VF set max MTU size i40e: Fix set max_tx_rate when it is lower than 1 Mbps
Mike Tipton (1): interconnect: qcom: icc-rpmh: Add BCMs to commit list in pre_aggregate
Mingwei Zhang (1): KVM: SEV: add cache flush to solve SEV cache incoherency issues
Mohan Kumar (1): ALSA: hda/tegra: set depop delay for tegra
Nathan Chancellor (2): arm64: Restrict ARM64_BTI_KERNEL to clang 12.0.0 and newer drm/amd/display: Mark dml30's UseMinimumDCFCLK() as noinline for stack usage
Nathan Huckleberry (1): drm/rockchip: Fix return type of cdn_dp_connector_mode_valid
Norbert Zulinski (1): iavf: Fix bad page state
Nícolas F. R. A. Prado (1): drm/mediatek: dsi: Move mtk_dsi_stop() call back to mtk_dsi_poweroff()
Pablo Neira Ayuso (1): netfilter: nfnetlink_osf: fix possible bogus match in nf_osf_find()
Pawel Laszczak (2): usb: cdns3: fix incorrect handling TRB_SMM flag for ISOC transfer usb: cdns3: fix issue with rearming ISO OUT endpoint
Peng Ju Zhou (1): drm/amdgpu: indirect register access for nv12 sriov
Piyush Mehta (1): usb: gadget: udc-xilinx: replace memcpy with memcpy_toio
Randy Dunlap (2): MIPS: lantiq: export clk_get_io() for lantiq_wdt.ko drm/hisilicon: Add depends on MMU
Sean Anderson (1): net: sunhme: Fix packet reception for len < RX_COPY_THRESHOLD
Serge Semin (1): MIPS: Loongson32: Fix PHY-mode being left unspecified
Sergiu Moga (1): tty: serial: atmel: Preserve previous USART mode if RS485 disabled
Sreekanth Reddy (1): scsi: mpt3sas: Fix return value check of dma_get_required_mask()
Stefan Haberland (1): s390/dasd: fix Oops in dasd_alias_get_start_dev due to missing pavgroup
Stefan Metzmacher (1): cifs: always initialize struct msghdr smb_msg completely
Suganath Prabu S (1): scsi: mpt3sas: Force PCIe scatterlist allocations to be within same 4 GB region
Takashi Iwai (1): ALSA: hda/realtek: Re-arrange quirk table entries
Tetsuo Handa (3): netfilter: nf_tables: fix nft_counters_enabled underflow at nf_tables_addchain() netfilter: nf_tables: fix percpu memory leak at nf_tables_addchain() workqueue: don't skip lockdep work dependency in cancel_work_sync()
Theodore Ts'o (1): ext4: limit the number of retries after discarding preallocations blocks
Thinh Nguyen (3): usb: dwc3: gadget: Prevent repeat pullup() usb: dwc3: gadget: Refactor pullup() usb: dwc3: gadget: Don't modify GEVNTCOUNT in pullup()
Utkarsh Patel (1): usb: typec: intel_pmc_mux: Add new ACPI ID for Meteor Lake IOM device
Uwe Kleine-König (1): i2c: imx: If pm_runtime_get_sync() returned 1 device access is possible
Victor Skvortsov (1): drm/amdgpu: Separate vf2pf work item init from virt data exchange
Vitaly Kuznetsov (1): Drivers: hv: Never allocate anything besides framebuffer from framebuffer memory region
Vladimir Oltean (3): net: enetc: move enetc_set_psfp() out of the common enetc_set_features() net/sched: taprio: avoid disabling offload when it was never enabled net/sched: taprio: make qdisc_leaf() see the per-netdev-queue pfifo child qdiscs
Wen Gu (1): net/smc: Stop the CLC flow if no link to map buffers on
Wesley Cheng (3): usb: dwc3: gadget: Avoid starting DWC3 gadget during UDC unbind usb: dwc3: Issue core soft reset before enabling run/stop usb: dwc3: gadget: Avoid duplicate requests to enable Run/Stop
Yao Wang1 (1): drm/amd/display: Limit user regamma to a valid value
Yi Liu (1): iommu/vt-d: Check correct capability for sagaw determination
huangwenhui (1): ALSA: hda/realtek: Add quirk for Huawei WRT-WX9
jerry meng (1): USB: serial: option: add Quectel RM520N
zain wang (1): arm64: dts: rockchip: Set RK3399-Gru PCLK_EDP to 24 MHz
zhang kai (1): net: let flow have same hash in two directions
Íñigo Huguet (2): sfc: fix TX channel offset when using legacy interrupts sfc: fix null pointer dereference in efx_hard_start_xmit
diff --git a/Makefile b/Makefile index 76c85e40beea..26a871eebe92 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 145 +SUBLEVEL = 146 EXTRAVERSION = NAME = Dare mighty things
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1116a8d092c0..af65ab83e63d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1654,7 +1654,10 @@ config ARM64_BTI_KERNEL depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 depends on !CC_IS_GCC || GCC_VERSION >= 100100 - depends on !(CC_IS_CLANG && GCOV_KERNEL) + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671 + depends on !CC_IS_GCC + # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc7... + depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help Build the kernel with Branch Target Identification annotations diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index e6c1c94c8d69..07737b65d7a3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts @@ -87,3 +87,8 @@ h1_int_od_l: h1-int-od-l { }; }; }; + +&wlan_host_wake_l { + /* Kevin has an external pull up, but Bob does not. */ + rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi index 1384dabbdf40..739937f70f8d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi @@ -237,6 +237,14 @@ &cdn_dp { &edp { status = "okay";
+ /* + * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only + * set this here, because rk3399-gru.dtsi ensures we can generate this + * off GPLL=600MHz, whereas some other RK3399 boards may not. + */ + assigned-clocks = <&cru PCLK_EDP>; + assigned-clock-rates = <24000000>; + ports { edp_out: port@1 { reg = <1>; @@ -395,6 +403,7 @@ wifi_perst_l: wifi-perst-l { };
wlan_host_wake_l: wlan-host-wake-l { + /* Kevin has an external pull up, but Bob does not */ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 544110aaffc5..95bc7a5f61dd 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -102,7 +102,6 @@ vcc3v3_sys: vcc3v3-sys { vcc5v0_host: vcc5v0-host-regulator { compatible = "regulator-fixed"; gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; - enable-active-low; pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_host_en>; regulator-name = "vcc5v0_host"; diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index 7a623684d9b5..2d5a0bcb0cec 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -50,6 +50,7 @@ struct clk *clk_get_io(void) { return &cpu_clk_generic[2]; } +EXPORT_SYMBOL_GPL(clk_get_io);
struct clk *clk_get_ppe(void) { diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c index 794c96c2a4cd..311dc1580bbd 100644 --- a/arch/mips/loongson32/common/platform.c +++ b/arch/mips/loongson32/common/platform.c @@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) if (plat_dat->bus_id) { __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 | GMAC1_USE_UART0, LS1X_MUX_CTRL0); - switch (plat_dat->interface) { + switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_RGMII: val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23); break; @@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) break; default: pr_err("unsupported mii mode %d\n", - plat_dat->interface); + plat_dat->phy_interface); return -ENOTSUPP; } val &= ~GMAC1_SHUT; } else { - switch (plat_dat->interface) { + switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_RGMII: val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01); break; @@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) break; default: pr_err("unsupported mii mode %d\n", - plat_dat->interface); + plat_dat->phy_interface); return -ENOTSUPP; } val &= ~GMAC0_SHUT; @@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) plat_dat = dev_get_platdata(&pdev->dev);
val &= ~PHY_INTF_SELI; - if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) + if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) val |= 0x4 << PHY_INTF_SELI_SHIFT; __raw_writel(val, LS1X_MUX_CTRL1);
@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = { .bus_id = 0, .phy_addr = -1, #if defined(CONFIG_LOONGSON1_LS1B) - .interface = PHY_INTERFACE_MODE_MII, + .phy_interface = PHY_INTERFACE_MODE_MII, #elif defined(CONFIG_LOONGSON1_LS1C) - .interface = PHY_INTERFACE_MODE_RMII, + .phy_interface = PHY_INTERFACE_MODE_RMII, #endif .mdio_bus_data = &ls1x_mdio_bus_data, .dma_cfg = &ls1x_eth_dma_cfg, @@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = { static struct plat_stmmacenet_data ls1x_eth1_pdata = { .bus_id = 1, .phy_addr = -1, - .interface = PHY_INTERFACE_MODE_MII, + .phy_interface = PHY_INTERFACE_MODE_MII, .mdio_bus_data = &ls1x_mdio_bus_data, .dma_cfg = &ls1x_eth_dma_cfg, .has_gmac = 1, diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index bc6841867b51..529c123cf0a4 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -121,6 +121,8 @@ SYSCALL_DEFINE0(rt_sigreturn) if (restore_altstack(&frame->uc.uc_stack)) goto badframe;
+ regs->cause = -1UL; + return regs->a0;
badframe: diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 38c63a78aba6..660012ab7bfa 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1275,6 +1275,7 @@ struct kvm_x86_ops { int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + void (*guest_memory_reclaimed)(struct kvm *kvm);
int (*get_msr_feature)(struct kvm_msr_entry *entry);
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7397cc449e2f..c2b34998c27d 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1177,6 +1177,14 @@ void sev_hardware_teardown(void) sev_flush_asids(); }
+void sev_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!sev_guest(kvm)) + return; + + wbinvd_on_all_cpus(); +} + void pre_sev_run(struct vcpu_svm *svm, int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, cpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 442705517caf..a0512a91760d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4325,6 +4325,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + .guest_memory_reclaimed = sev_guest_memory_reclaimed,
.can_emulate_instruction = svm_can_emulate_instruction,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 10aba1dd264e..f62d13fc6e01 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -491,6 +491,8 @@ int svm_register_enc_region(struct kvm *kvm, struct kvm_enc_region *range); int svm_unregister_enc_region(struct kvm *kvm, struct kvm_enc_region *range); +void sev_guest_memory_reclaimed(struct kvm *kvm); + void pre_sev_run(struct vcpu_svm *svm, int cpu); int __init sev_hardware_setup(void); void sev_hardware_teardown(void); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c5a08ec348e6..f3473418dcd5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8875,6 +8875,12 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); }
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ + if (kvm_x86_ops.guest_memory_reclaimed) + kvm_x86_ops.guest_memory_reclaimed(kvm); +} + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { if (!lapic_in_kernel(vcpu)) diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c index cb6401c9e9a4..acf31cc1dbcc 100644 --- a/drivers/dax/hmem/device.c +++ b/drivers/dax/hmem/device.c @@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r) .start = r->start, .end = r->end, .flags = IORESOURCE_MEM, + .desc = IORES_DESC_SOFT_RESERVED, }; struct platform_device *pdev; struct memregion_info info; diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c index 8563a392f30b..dadab2feca08 100644 --- a/drivers/dma/ti/k3-udma-private.c +++ b/drivers/dma/ti/k3-udma-private.c @@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) }
pdev = of_find_device_by_node(udma_node); + if (np != udma_node) + of_node_put(udma_node); + if (!pdev) { pr_debug("UDMA device not found\n"); return ERR_PTR(-EPROBE_DEFER); }
- if (np != udma_node) - of_node_put(udma_node); - ud = platform_get_drvdata(pdev); if (!ud) { pr_debug("UDMA has not been probed\n"); diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 5efc524b14be..a2be3a71bcf8 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -19,7 +19,7 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
/* SHIM variables */ static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; -static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; +static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
/* * Determine whether we're in secure boot mode. @@ -53,8 +53,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/* * See if a user has put the shim into insecure mode. If so, and if the - * variable doesn't have the runtime attribute set, we might as well - * honor that. + * variable doesn't have the non-volatile attribute set, we might as + * well honor that. */ size = sizeof(moksbstate); status = get_efi_var(shim_MokSBState_name, &shim_guid, @@ -63,7 +63,7 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* If it fails, we don't care why. Default to secure */ if (status != EFI_SUCCESS) goto secure_boot_enabled; - if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1) + if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1) return efi_secureboot_mode_disabled;
secure_boot_enabled: diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 3672539cb96e..5d0f1b1966fc 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -414,6 +414,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, hdr->ramdisk_image = 0; hdr->ramdisk_size = 0;
+ /* + * Disregard any setup data that was provided by the bootloader: + * setup_data could be pointing anywhere, and we have no way of + * authenticating or validating the payload. + */ + hdr->setup_data = 0; + efi_stub_entry(handle, sys_table_arg, boot_params); /* not reached */
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 780cba4e30d0..876027fdefc9 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -604,9 +604,9 @@ static int __init gpio_mockup_init(void)
static void __exit gpio_mockup_exit(void) { + gpio_mockup_unregister_pdevs(); debugfs_remove_recursive(gpio_mockup_dbg_dir); platform_driver_unregister(&gpio_mockup_driver); - gpio_mockup_unregister_pdevs(); }
module_init(gpio_mockup_init); diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 2613881a66e6..381cfa26a4a1 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -1769,7 +1769,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) ret = -ENODEV; goto out_free_le; } - le->irq = irq;
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? @@ -1783,7 +1782,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) init_waitqueue_head(&le->wait);
/* Request a thread to read the events */ - ret = request_threaded_irq(le->irq, + ret = request_threaded_irq(irq, lineevent_irq_handler, lineevent_irq_thread, irqflags, @@ -1792,6 +1791,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_le;
+ le->irq = irq; + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); if (fd < 0) { ret = fd; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f262c4e7a48a..881045e600af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2047,6 +2047,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); return r; } + + /*get pf2vf msg info at it's earliest time*/ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_init_data_exchange(adev); + } }
@@ -2174,8 +2179,20 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) } adev->ip_blocks[i].status.sw = true;
- /* need to do gmc hw init early so we can allocate gpu mem */ - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { + /* need to do common hw init early so everything is set up for gmc */ + r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + if (r) { + DRM_ERROR("hw_init %d failed %d\n", i, r); + goto init_failed; + } + adev->ip_blocks[i].status.hw = true; + } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + /* need to do gmc hw init early so we can allocate gpu mem */ + /* Try to reserve bad pages early */ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_exchange_data(adev); + r = amdgpu_device_vram_scratch_init(adev); if (r) { DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); @@ -2753,8 +2770,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) int i, r;
static enum amd_ip_block_type ip_order[] = { - AMD_IP_BLOCK_TYPE_GMC, AMD_IP_BLOCK_TYPE_COMMON, + AMD_IP_BLOCK_TYPE_GMC, AMD_IP_BLOCK_TYPE_PSP, AMD_IP_BLOCK_TYPE_IH, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 7cc7af2a6822..947f50e402ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -35,6 +35,7 @@ #include <linux/pci.h> #include <linux/pm_runtime.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_damage_helper.h> #include <drm/drm_edid.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_fb_helper.h> @@ -498,6 +499,7 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector, static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { .destroy = drm_gem_fb_destroy, .create_handle = drm_gem_fb_create_handle, + .dirty = drm_atomic_helper_dirtyfb, };
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index eb22a190c242..3638f0e12a2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1979,15 +1979,12 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, return 0; }
-static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev) +static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { - if (adev->asic_type != CHIP_VEGA10 && - adev->asic_type != CHIP_VEGA20 && - adev->asic_type != CHIP_ARCTURUS && - adev->asic_type != CHIP_SIENNA_CICHLID) - return 1; - else - return 0; + return adev->asic_type == CHIP_VEGA10 || + adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS || + adev->asic_type == CHIP_SIENNA_CICHLID; }
/* @@ -2006,7 +2003,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, *supported = 0;
if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || - amdgpu_ras_check_asic_type(adev)) + !amdgpu_ras_asic_supported(adev)) return;
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e7678ba8fdcf..16bfb36c27e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -580,16 +580,34 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) { - uint64_t bp_block_offset = 0; - uint32_t bp_block_size = 0; - struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; - adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0;
if (adev->mman.fw_vram_usage_va != NULL) { - adev->virt.vf2pf_update_interval_ms = 2000; + /* go through this logic in ip_init and reset to init workqueue*/ + amdgpu_virt_exchange_data(adev); + + INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); + schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); + } else if (adev->bios != NULL) { + /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + + amdgpu_virt_read_pf2vf_data(adev); + } +} + + +void amdgpu_virt_exchange_data(struct amdgpu_device *adev) +{ + uint64_t bp_block_offset = 0; + uint32_t bp_block_size = 0; + struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; + + if (adev->mman.fw_vram_usage_va != NULL) {
adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) @@ -616,13 +634,9 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); } } - - if (adev->virt.vf2pf_update_interval_ms != 0) { - INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); - schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); - } }
+ void amdgpu_detect_virtualization(struct amdgpu_device *adev) { uint32_t reg; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 8dd624c20f89..77b9d37bfa1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -271,6 +271,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); +void amdgpu_virt_exchange_data(struct amdgpu_device *adev); void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); void amdgpu_detect_virtualization(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 1f2e2460e121..a1a8e026b9fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1475,6 +1475,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) WREG32_SDMA(i, mmSDMA0_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) { + ring = &adev->sdma.instance[i].ring; + adev->nbio.funcs->sdma_doorbell_range(adev, i, + ring->use_doorbell, ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range); + /* unhalt engine */ temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL); temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 7212b9900e0a..abd649285a22 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1332,25 +1332,6 @@ static int soc15_common_sw_fini(void *handle) return 0; }
-static void soc15_doorbell_range_init(struct amdgpu_device *adev) -{ - int i; - struct amdgpu_ring *ring; - - /* sdma/ih doorbell range are programed by hypervisor */ - if (!amdgpu_sriov_vf(adev)) { - for (i = 0; i < adev->sdma.num_instances; i++) { - ring = &adev->sdma.instance[i].ring; - adev->nbio.funcs->sdma_doorbell_range(adev, i, - ring->use_doorbell, ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range); - } - - adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, - adev->irq.ih.doorbell_index); - } -} - static int soc15_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1370,12 +1351,6 @@ static int soc15_common_hw_init(void *handle)
/* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); - /* HW doorbell routing policy: doorbell writing not - * in SDMA/IH/MM/ACV range will be routed to CP. So - * we need to init SDMA/IH/MM/ACV doorbell range prior - * to CP ip block init and ring test. - */ - soc15_doorbell_range_init(adev);
return 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 2663f1b31842..e427f4ffa080 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -6653,8 +6653,7 @@ static double CalculateUrgentLatency( return ret; }
- -static void UseMinimumDCFCLK( +static noinline_for_stack void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, int MaxInterDCNTileRepeaters, int MaxPrefetchMode, diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 09bc2c249e1a..3c4390d71a82 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1524,6 +1524,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num, struct fixed31_32 lut2; struct fixed31_32 delta_lut; struct fixed31_32 delta_index; + const struct fixed31_32 one = dc_fixpt_from_int(1);
i = 0; /* fixed_pt library has problems handling too small values */ @@ -1552,6 +1553,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num, } else hw_x = coordinates_x[i].x;
+ if (dc_fixpt_le(one, hw_x)) + hw_x = one; + norm_x = dc_fixpt_mul(norm_factor, hw_x); index = dc_fixpt_floor(norm_x); if (index < 0 || index > 255) diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 3df6d6e850f5..70148ae16f14 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -529,15 +529,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc, WARN_ON(drm_crtc_vblank_get(crtc) != 0);
gma_crtc->page_flip_event = event; + spin_unlock_irqrestore(&dev->event_lock, flags);
/* Call this locked if we want an event at vblank interrupt. */ ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); if (ret) { - gma_crtc->page_flip_event = NULL; - drm_crtc_vblank_put(crtc); + spin_lock_irqsave(&dev->event_lock, flags); + if (gma_crtc->page_flip_event) { + gma_crtc->page_flip_event = NULL; + drm_crtc_vblank_put(crtc); + } + spin_unlock_irqrestore(&dev->event_lock, flags); } - - spin_unlock_irqrestore(&dev->event_lock, flags); } else { ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); } diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig index 43943e980203..4e41c144a290 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig +++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_HISI_HIBMC tristate "DRM Support for Hisilicon Hibmc" - depends on DRM && PCI && ARM64 + depends on DRM && PCI && (ARM64 || COMPILE_TEST) + depends on MMU select DRM_KMS_HELPER select DRM_VRAM_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 7d37d2a01e3c..146c4d04f572 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -668,6 +668,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) if (--dsi->refcount != 0) return;
+ /* + * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since + * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), + * which needs irq for vblank, and mtk_dsi_stop() will disable irq. + * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), + * after dsi is fully set. + */ + mtk_dsi_stop(dsi); + + mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); mtk_dsi_reset_engine(dsi); mtk_dsi_lane0_ulp_mode_enter(dsi); mtk_dsi_clk_ulp_mode_enter(dsi); @@ -718,17 +728,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) if (!dsi->enabled) return;
- /* - * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since - * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), - * which needs irq for vblank, and mtk_dsi_stop() will disable irq. - * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), - * after dsi is fully set. - */ - mtk_dsi_stop(dsi); - - mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); - dsi->enabled = false; }
@@ -791,10 +790,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = { .attach = mtk_dsi_bridge_attach, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_disable = mtk_dsi_bridge_atomic_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_enable = mtk_dsi_bridge_atomic_enable, .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable, .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable, + .atomic_reset = drm_atomic_helper_bridge_reset, .mode_set = mtk_dsi_bridge_mode_set, };
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index bf2c845ef3a2..b7b37082a9d7 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2201,7 +2201,7 @@ static const struct panel_desc innolux_g121i1_l01 = { .enable = 200, .disable = 20, }, - .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, .connector_type = DRM_MODE_CONNECTOR_LVDS, };
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index dec54c70e008..857c47c69ef1 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -276,8 +276,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector) return ret; }
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +cdn_dp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct cdn_dp_device *dp = connector_to_dp(connector); struct drm_display_info *display_info = &dp->connector.display_info; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 5d820037e291..514279dac7cb 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2251,7 +2251,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, bool fb_overlap_ok) { struct resource *iter, *shadow; - resource_size_t range_min, range_max, start; + resource_size_t range_min, range_max, start, end; const char *dev_n = dev_name(&device_obj->device); int retval;
@@ -2286,6 +2286,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, range_max = iter->end; start = (range_min + align - 1) & ~(align - 1); for (; start + size - 1 <= range_max; start += align) { + end = start + size - 1; + + /* Skip the whole fb_mmio region if not fb_overlap_ok */ + if (!fb_overlap_ok && fb_mmio && + (((start >= fb_mmio->start) && (start <= fb_mmio->end)) || + ((end >= fb_mmio->start) && (end <= fb_mmio->end)))) + continue; + shadow = __request_region(iter, start, size, NULL, IORESOURCE_BUSY); if (!shadow) diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index d3719df1c40d..be4ad516293b 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1289,7 +1289,7 @@ static int i2c_imx_remove(struct platform_device *pdev) if (i2c_imx->dma) i2c_imx_dma_free(i2c_imx);
- if (ret == 0) { + if (ret >= 0) { /* setup chip registers to defaults */ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index ab261d762dea..bea82a787b4f 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -6,6 +6,7 @@ */
#include <linux/acpi.h> +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> @@ -63,13 +64,14 @@ */ #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000) /* Reference clock for Bluefield - 156 MHz. */ -#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000) +#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL
/* Constant used to determine the PLL frequency. */ -#define MLNXBF_I2C_COREPLL_CONST 16384 +#define MLNXBF_I2C_COREPLL_CONST 16384ULL + +#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL
/* PLL registers. */ -#define MLXBF_I2C_CORE_PLL_REG0 0x0 #define MLXBF_I2C_CORE_PLL_REG1 0x4 #define MLXBF_I2C_CORE_PLL_REG2 0x8
@@ -187,22 +189,15 @@ enum { #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
/* Core PLL TYU configuration. */ -#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0) -#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0) -#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0) - -#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3 -#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16 -#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20 +#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3) +#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16) +#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20)
/* Core PLL YU configuration. */ #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0) #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0) -#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0) +#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26)
-#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0 -#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1 -#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
/* Core PLL frequency. */ static u64 mlxbf_i2c_corepll_frequency; @@ -485,8 +480,6 @@ static struct mutex mlxbf_i2c_bus_lock; #define MLXBF_I2C_MASK_8 GENMASK(7, 0) #define MLXBF_I2C_MASK_16 GENMASK(15, 0)
-#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000 - /* * Function to poll a set of bits at a specific address; it checks whether * the bits are equal to zero when eq_zero is set to 'true', and not equal @@ -675,7 +668,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave, /* Clear status bits. */ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS); /* Set the cause data. */ - writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR); + writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); /* Zero PEC byte. */ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC); /* Zero byte count. */ @@ -744,6 +737,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, if (flags & MLXBF_I2C_F_WRITE) { write_en = 1; write_len += operation->length; + if (data_idx + operation->length > + MLXBF_I2C_MASTER_DATA_DESC_SIZE) + return -ENOBUFS; memcpy(data_desc + data_idx, operation->buffer, operation->length); data_idx += operation->length; @@ -1413,24 +1409,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev, return 0; }
-static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) +static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) { - u64 core_frequency, pad_frequency; + u64 core_frequency; u8 core_od, core_r; u32 corepll_val; u16 core_f;
- pad_frequency = MLXBF_I2C_PLL_IN_FREQ; - corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
/* Get Core PLL configuration bits. */ - core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_F_TYU_MASK; - core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK; - core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_R_TYU_MASK; + core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val); + core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val); + core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
/* * Compute PLL output frequency as follow: @@ -1442,31 +1433,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ - core_frequency = pad_frequency * (++core_f); + core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f); core_frequency /= (++core_r) * (++core_od);
return core_frequency; }
-static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) +static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) { u32 corepll_reg1_val, corepll_reg2_val; - u64 corepll_frequency, pad_frequency; + u64 corepll_frequency; u8 core_od, core_r; u32 core_f;
- pad_frequency = MLXBF_I2C_PLL_IN_FREQ; - corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
/* Get Core PLL configuration bits */ - core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_F_YU_MASK; - core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_R_YU_MASK; - core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_OD_YU_MASK; + core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val); + core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val); + core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
/* * Compute PLL output frequency as follow: @@ -1478,7 +1464,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ - corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST; + corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST; corepll_frequency /= (++core_r) * (++core_od);
return corepll_frequency; @@ -2186,14 +2172,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = { [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1], [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1] }, - .calculate_freq = mlxbf_calculate_freq_from_tyu + .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu }, [MLXBF_I2C_CHIP_TYPE_2] = { .type = MLXBF_I2C_CHIP_TYPE_2, .shared_res = { [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2] }, - .calculate_freq = mlxbf_calculate_freq_from_yu + .calculate_freq = mlxbf_i2c_calculate_freq_from_yu } };
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index f6fae64861ce..27cc5f03611c 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -20,13 +20,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node) { size_t i; struct qcom_icc_node *qn; + struct qcom_icc_provider *qp;
qn = node->data; + qp = to_qcom_provider(node->provider);
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { qn->sum_avg[i] = 0; qn->max_peak[i] = 0; } + + for (i = 0; i < qn->num_bcms; i++) + qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); } EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
@@ -44,10 +49,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, { size_t i; struct qcom_icc_node *qn; - struct qcom_icc_provider *qp;
qn = node->data; - qp = to_qcom_provider(node->provider);
if (!tag) tag = QCOM_ICC_TAG_ALWAYS; @@ -67,9 +70,6 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, *agg_avg += avg_bw; *agg_peak = max_t(u32, *agg_peak, peak_bw);
- for (i = 0; i < qn->num_bcms; i++) - qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); - return 0; } EXPORT_SYMBOL_GPL(qcom_icc_aggregate); diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c index c76b2c7f9b10..b936196c229c 100644 --- a/drivers/interconnect/qcom/sm8150.c +++ b/drivers/interconnect/qcom/sm8150.c @@ -627,7 +627,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8150", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c index cc558fec74e3..40820043c8d3 100644 --- a/drivers/interconnect/qcom/sm8250.c +++ b/drivers/interconnect/qcom/sm8250.c @@ -643,7 +643,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8250", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 93c60712a948..c48cf737b521 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -569,7 +569,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) { unsigned long fl_sagaw, sl_sagaw;
- fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0); + fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0); sl_sagaw = cap_sagaw(iommu->cap);
/* Second level only. */ diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index a2563c254080..2299d5cca8ff 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -512,7 +512,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; - if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc)) + if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc)) return -ENODEV;
switch (fc_usb->udev->speed) { diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 0b09cdaaeb6c..899768ed1688 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -932,15 +932,16 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/* Erase init depends on CSD and SSR */ mmc_init_erase(card); - - /* - * Fetch switch information from card. - */ - err = mmc_read_switch(card); - if (err) - return err; }
+ /* + * Fetch switch information from card. Note, sd3_bus_mode can change if + * voltage switch outcome changes, so do this always. + */ + err = mmc_read_switch(card); + if (err) + return err; + /* * For SPI, enable CRC as appropriate. * This CRC enable is located AFTER the reading of the @@ -1089,26 +1090,15 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) && mmc_sd_card_using_v18(card) && host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) { - /* - * Re-read switch information in case it has changed since - * oldcard was initialized. - */ - if (oldcard) { - err = mmc_read_switch(card); - if (err) - goto free_card; - } - if (mmc_sd_card_using_v18(card)) { - if (mmc_host_set_uhs_voltage(host) || - mmc_sd_init_uhs_card(card)) { - v18_fixup_failed = true; - mmc_power_cycle(host, ocr); - if (!oldcard) - mmc_remove_card(card); - goto retry; - } - goto cont; + if (mmc_host_set_uhs_voltage(host) || + mmc_sd_init_uhs_card(card)) { + v18_fixup_failed = true; + mmc_power_cycle(host, ocr); + if (!oldcard) + mmc_remove_card(card); + goto retry; } + goto cont; }
/* Initialization sequence for UHS-I cards */ diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index b0f8d551b61d..acb6ff0be5ff 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -85,8 +85,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = { static u16 ad_ticks_per_sec; static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = - MULTICAST_LACPDU_ADDR; +const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = { + 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 +};
/* ================= main 802.3ad protocol functions ================== */ static int ad_lacpdu_send(struct port *port); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 9c4b45341fd2..f38a6ce5749b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -827,12 +827,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev, dev_uc_unsync(slave_dev, bond_dev); dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* del lacpdu mc addr from mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_del(slave_dev, lacpdu_multicast); - } + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_del(slave_dev, lacpdu_mcast_addr); }
/*--------------------------- Active slave change ---------------------------*/ @@ -852,7 +848,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev); + if (bond->dev->flags & IFF_UP) + bond_hw_addr_flush(bond->dev, old_active->dev); }
if (new_active) { @@ -863,10 +860,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev); - dev_uc_sync(new_active->dev, bond->dev); - dev_mc_sync(new_active->dev, bond->dev); - netif_addr_unlock_bh(bond->dev); + if (bond->dev->flags & IFF_UP) { + netif_addr_lock_bh(bond->dev); + dev_uc_sync(new_active->dev, bond->dev); + dev_mc_sync(new_active->dev, bond->dev); + netif_addr_unlock_bh(bond->dev); + } } }
@@ -2073,16 +2072,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, } }
- netif_addr_lock_bh(bond_dev); - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - netif_addr_unlock_bh(bond_dev); + if (bond_dev->flags & IFF_UP) { + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_add(slave_dev, lacpdu_multicast); + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_add(slave_dev, lacpdu_mcast_addr); } }
@@ -2310,7 +2307,8 @@ static int __bond_release_one(struct net_device *bond_dev, if (old_flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev); + if (old_flags & IFF_UP) + bond_hw_addr_flush(bond_dev, slave_dev); }
slave_disable_netpoll(slave); @@ -3772,6 +3770,9 @@ static int bond_open(struct net_device *bond_dev) /* register to receive LACPDUs */ bond->recv_probe = bond_3ad_lacpdu_recv; bond_3ad_initiate_agg_selection(bond, 1); + + bond_for_each_slave(bond, slave, iter) + dev_mc_add(slave->dev, lacpdu_mcast_addr); }
if (bond_mode_can_use_xmit_hash(bond)) @@ -3783,6 +3784,7 @@ static int bond_open(struct net_device *bond_dev) static int bond_close(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave;
bond_work_cancel_all(bond); bond->send_peer_notif = 0; @@ -3790,6 +3792,19 @@ static int bond_close(struct net_device *bond_dev) bond_alb_deinitialize(bond); bond->recv_probe = NULL;
+ if (bond_uses_primary(bond)) { + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (slave) + bond_hw_addr_flush(bond_dev, slave->dev); + rcu_read_unlock(); + } else { + struct list_head *iter; + + bond_for_each_slave(bond, slave, iter) + bond_hw_addr_flush(bond_dev, slave->dev); + } + return 0; }
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 7cbaac238ff6..429950241de3 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -954,11 +954,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, u32 reg_ctrl, reg_id, reg_iflag1; int i;
- if (unlikely(drop)) { - skb = ERR_PTR(-ENOBUFS); - goto mark_as_read; - } - mb = flexcan_get_mb(priv, n);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { @@ -987,6 +982,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, reg_ctrl = priv->read(&mb->can_ctrl); }
+ if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + if (reg_ctrl & FLEXCAN_MB_CNT_EDL) skb = alloc_canfd_skb(offload->dev, &cfd); else diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 1bfc497da9ac..a879200eaab0 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -678,6 +678,7 @@ static int gs_can_open(struct net_device *netdev) flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
/* finally start device */ + dev->can.state = CAN_STATE_ERROR_ACTIVE; dm->mode = cpu_to_le32(GS_CAN_MODE_START); dm->flags = cpu_to_le32(flags); rc = usb_control_msg(interface_to_usbdev(dev->iface), @@ -694,13 +695,12 @@ static int gs_can_open(struct net_device *netdev) if (rc < 0) { netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); kfree(dm); + dev->can.state = CAN_STATE_STOPPED; return rc; }
kfree(dm);
- dev->can.state = CAN_STATE_ERROR_ACTIVE; - parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 15aa3b3c0089..4af253825957 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1671,29 +1671,6 @@ static int enetc_set_rss(struct net_device *ndev, int en) return 0; }
-static int enetc_set_psfp(struct net_device *ndev, int en) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - int err; - - if (en) { - err = enetc_psfp_enable(priv); - if (err) - return err; - - priv->active_offloads |= ENETC_F_QCI; - return 0; - } - - err = enetc_psfp_disable(priv); - if (err) - return err; - - priv->active_offloads &= ~ENETC_F_QCI; - - return 0; -} - static void enetc_enable_rxvlan(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -1712,11 +1689,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en) enetc_bdr_enable_txvlan(&priv->si->hw, i, en); }
-int enetc_set_features(struct net_device *ndev, - netdev_features_t features) +void enetc_set_features(struct net_device *ndev, netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; - int err = 0;
if (changed & NETIF_F_RXHASH) enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); @@ -1728,11 +1703,6 @@ int enetc_set_features(struct net_device *ndev, if (changed & NETIF_F_HW_VLAN_CTAG_TX) enetc_enable_txvlan(ndev, !!(features & NETIF_F_HW_VLAN_CTAG_TX)); - - if (changed & NETIF_F_HW_TC) - err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); - - return err; }
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 15d19cbd5a95..00386c5d3cde 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -301,8 +301,7 @@ void enetc_start(struct net_device *ndev); void enetc_stop(struct net_device *ndev); netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); struct net_device_stats *enetc_get_stats(struct net_device *ndev); -int enetc_set_features(struct net_device *ndev, - netdev_features_t features); +void enetc_set_features(struct net_device *ndev, netdev_features_t features); int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data); @@ -335,6 +334,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data); int enetc_psfp_init(struct enetc_ndev_priv *priv); int enetc_psfp_clean(struct enetc_ndev_priv *priv); +int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv) { @@ -410,4 +410,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv) { return 0; } + +static inline int enetc_set_psfp(struct net_device *ndev, bool en) +{ + return 0; +} #endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 716b396bf094..6904e10dd46b 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -671,6 +671,13 @@ static int enetc_pf_set_features(struct net_device *ndev, { netdev_features_t changed = ndev->features ^ features; struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (changed & NETIF_F_HW_TC) { + err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); + if (err) + return err; + }
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { struct enetc_pf *pf = enetc_si_priv(priv->si); @@ -684,7 +691,9 @@ static int enetc_pf_set_features(struct net_device *ndev, if (changed & NETIF_F_LOOPBACK) enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
- return enetc_set_features(ndev, features); + enetc_set_features(ndev, features); + + return 0; }
static const struct net_device_ops enetc_ndev_ops = { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 9e6988fd3787..62efe1aebf86 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1525,6 +1525,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } }
+int enetc_set_psfp(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (en) { + err = enetc_psfp_enable(priv); + if (err) + return err; + + priv->active_offloads |= ENETC_F_QCI; + return 0; + } + + err = enetc_psfp_disable(priv); + if (err) + return err; + + priv->active_offloads &= ~ENETC_F_QCI; + + return 0; +} + int enetc_psfp_init(struct enetc_ndev_priv *priv) { if (epsfp.psfp_sfi_bitmap) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 33c125735db7..5ce3e2593bdd 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -88,7 +88,9 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) static int enetc_vf_set_features(struct net_device *ndev, netdev_features_t features) { - return enetc_set_features(ndev, features); + enetc_set_features(ndev, features); + + return 0; }
/* Probing/ Init */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 97009cbea779..c7f243ddbcf7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5733,6 +5733,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi) } }
+/** + * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits + * @vsi: Pointer to vsi structure + * @max_tx_rate: max TX rate in bytes to be converted into Mbits + * + * Helper function to convert units before send to set BW limit + **/ +static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) +{ + if (max_tx_rate < I40E_BW_MBPS_DIVISOR) { + dev_warn(&vsi->back->pdev->dev, + "Setting max tx rate to minimum usable value of 50Mbps.\n"); + max_tx_rate = I40E_BW_CREDIT_DIVISOR; + } else { + do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); + } + + return max_tx_rate; +} + /** * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate * @vsi: VSI to be configured @@ -5755,10 +5775,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) max_tx_rate, seid); return -EINVAL; } - if (max_tx_rate && max_tx_rate < 50) { + if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) { dev_warn(&pf->pdev->dev, "Setting max tx rate to minimum usable value of 50Mbps.\n"); - max_tx_rate = 50; + max_tx_rate = I40E_BW_CREDIT_DIVISOR; }
/* Tx rate credits are in values of 50Mbps, 0 is disabled */ @@ -7719,9 +7739,9 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
if (pf->flags & I40E_FLAG_TC_MQPRIO) { if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]);
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (!ret) { u64 credits = max_tx_rate; @@ -10366,10 +10386,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) }
if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]); u64 credits = 0;
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) goto end_unlock; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 1947c5a77550..ffff7de801af 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1985,6 +1985,25 @@ static void i40e_del_qch(struct i40e_vf *vf) } }
+/** + * i40e_vc_get_max_frame_size + * @vf: pointer to the VF + * + * Max frame size is determined based on the current port's max frame size and + * whether a port VLAN is configured on this VF. The VF is not aware whether + * it's in a port VLAN so the PF needs to account for this in max frame size + * checks and sending the max frame size to the VF. + **/ +static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) +{ + u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; + + if (vf->port_vlan_id) + max_frame_size -= VLAN_HLEN; + + return max_frame_size; +} + /** * i40e_vc_get_vf_resources_msg * @vf: pointer to the VF info @@ -2085,6 +2104,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; + vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) { vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 99983f7a0ce0..d481a922f018 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) { u32 head, tail;
+ /* underlying hardware might not allow access and/or always return + * 0 for the head/tail registers so just use the cached values + */ head = ring->next_to_clean; - tail = readl(ring->tail); + tail = ring->next_to_use;
if (head != tail) return (head < tail) ? @@ -1368,7 +1371,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, #endif struct sk_buff *skb;
- if (!rx_buffer) + if (!rx_buffer || !size) return NULL; /* prefetch first cache line of first page */ va = page_address(rx_buffer->page) + rx_buffer->page_offset; @@ -1526,7 +1529,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; - if (rx_buffer) + if (rx_buffer && size) rx_buffer->pagecnt_bias++; break; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index ff479bf72144..5deee75bc436 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -241,11 +241,14 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; - struct virtchnl_queue_pair_info *vqpi; + int i, max_frame = adapter->vf_res->max_mtu; int pairs = adapter->num_active_queues; - int i, max_frame = IAVF_MAX_RXBUFFER; + struct virtchnl_queue_pair_info *vqpi; size_t len;
+ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) + max_frame = IAVF_MAX_RXBUFFER; + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index d0f1b2dc7dff..c49168ba7a4d 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -308,7 +308,7 @@ int efx_probe_interrupts(struct efx_nic *efx) efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); efx->n_rx_channels = 1; efx->n_tx_channels = 1; - efx->tx_channel_offset = 1; + efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; efx->legacy_irq = efx->pci_dev->irq; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 1665529a7271..fcc7de8ae2bf 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -545,7 +545,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, * previous packets out. */ if (!netdev_xmit_more()) - efx_tx_send_pending(tx_queue->channel); + efx_tx_send_pending(efx_get_tx_channel(efx, index)); return NETDEV_TX_OK; }
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 69fc47089e62..940db4ec5714 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2063,9 +2063,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb_reserve(copy_skb, 2); skb_put(copy_skb, len); - dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); - dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index fe91b72eca36..64b12e462765 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1251,20 +1251,18 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) /* Initialize a ring, including allocating DMA memory for its entries */ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) { - size_t size = count * GSI_RING_ELEMENT_SIZE; + u32 size = count * GSI_RING_ELEMENT_SIZE; struct device *dev = gsi->dev; dma_addr_t addr;
- /* Hardware requires a 2^n ring size, with alignment equal to size */ + /* Hardware requires a 2^n ring size, with alignment equal to size. + * The DMA address returned by dma_alloc_coherent() is guaranteed to + * be a power-of-2 number of pages, which satisfies the requirement. + */ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); - if (ring->virt && addr % size) { - dma_free_coherent(dev, size, ring->virt, addr); - dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", - size); - return -EINVAL; /* Not a good error value, but distinct */ - } else if (!ring->virt) { + if (!ring->virt) return -ENOMEM; - } + ring->addr = addr; ring->count = count;
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h index 1785c9d3344d..d58dce46e061 100644 --- a/drivers/net/ipa/gsi_private.h +++ b/drivers/net/ipa/gsi_private.h @@ -14,7 +14,7 @@ struct gsi_trans; struct gsi_ring; struct gsi_channel;
-#define GSI_RING_ELEMENT_SIZE 16 /* bytes */ +#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
/* Return the entry that follows one provided in a transaction pool */ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element); diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 6c3ed5b17b80..70c2b585f98d 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -153,11 +153,10 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, size = __roundup_pow_of_two(size); total_size = (count + max_alloc - 1) * size;
- /* The allocator will give us a power-of-2 number of pages. But we - * can't guarantee that, so request it. That way we won't waste any - * memory that would be available beyond the required space. - * - * Note that gsi_trans_pool_exit_dma() assumes the total allocated + /* The allocator will give us a power-of-2 number of pages + * sufficient to satisfy our request. Round up our requested + * size to avoid any unused space in the allocation. This way + * gsi_trans_pool_exit_dma() can assume the total allocated * size is exactly (count * size). */ total_size = get_order(total_size) << PAGE_SHIFT; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index a47378b7d9b2..dc94ce035655 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -154,7 +154,7 @@ static void ipa_cmd_validate_build(void) * of entries, as and IPv4 and IPv6 route tables have the same number * of entries. */ -#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE) +#define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64)) #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX) BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK)); BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index 7fc1058a5ca9..ba05e26c3c60 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -72,8 +72,8 @@ * that can be included in a single transaction. */ struct gsi_channel_data { - u16 tre_count; - u16 event_count; + u16 tre_count; /* must be a power of 2 */ + u16 event_count; /* must be a power of 2 */ u8 tlv_count; };
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 1a87a49538c5..880ec353f958 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) mem = &ipa->mem[IPA_MEM_V4_ROUTE]; req.v4_route_tbl_info_valid = 1; req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE; + req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = &ipa->mem[IPA_MEM_V6_ROUTE]; req.v6_route_tbl_info_valid = 1; req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE; + req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = &ipa->mem[IPA_MEM_V4_FILTER]; req.v4_filter_tbl_start_valid = 1; @@ -352,8 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) req.v4_hash_route_tbl_info_valid = 1; req.v4_hash_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v4_hash_route_tbl_info.count = - mem->size / IPA_TABLE_ENTRY_SIZE; + req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; }
mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]; @@ -361,8 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) req.v6_hash_route_tbl_info_valid = 1; req.v6_hash_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v6_hash_route_tbl_info.count = - mem->size / IPA_TABLE_ENTRY_SIZE; + req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; }
mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED]; diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 73413371e3d3..ecf9f863c842 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -271,7 +271,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x12, .offset = offsetof(struct ipa_init_modem_driver_req, v4_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -292,7 +292,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x13, .offset = offsetof(struct ipa_init_modem_driver_req, v6_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -456,7 +456,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x1b, .offset = offsetof(struct ipa_init_modem_driver_req, v4_hash_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -477,7 +477,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x1c, .offset = offsetof(struct ipa_init_modem_driver_req, v6_hash_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index cfac456cea0c..58de425bb8e6 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -82,9 +82,11 @@ enum ipa_platform_type { IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */ };
-/* This defines the start and end offset of a range of memory. Both - * fields are offsets relative to the start of IPA shared memory. - * The end value is the last addressable byte *within* the range. +/* This defines the start and end offset of a range of memory. The start + * value is a byte offset relative to the start of IPA shared memory. The + * end value is the last addressable unit *within* the range. Typically + * the end value is in units of bytes, however it can also be a maximum + * array index value. */ struct ipa_mem_bounds { u32 start; @@ -125,18 +127,19 @@ struct ipa_init_modem_driver_req { u8 hdr_tbl_info_valid; struct ipa_mem_bounds hdr_tbl_info;
- /* Routing table information. These define the location and size of - * non-hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + /* Routing table information. These define the location and maximum + * *index* (not byte) for the modem portion of non-hashable IPv4 and + * IPv6 routing tables. The start values are byte offsets relative + * to the start of IPA shared memory. */ u8 v4_route_tbl_info_valid; - struct ipa_mem_array v4_route_tbl_info; + struct ipa_mem_bounds v4_route_tbl_info; u8 v6_route_tbl_info_valid; - struct ipa_mem_array v6_route_tbl_info; + struct ipa_mem_bounds v6_route_tbl_info;
/* Filter table information. These define the location of the * non-hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + * byte offsets relative to the start of IPA shared memory. */ u8 v4_filter_tbl_start_valid; u32 v4_filter_tbl_start; @@ -177,18 +180,20 @@ struct ipa_init_modem_driver_req { u8 zip_tbl_info_valid; struct ipa_mem_bounds zip_tbl_info;
- /* Routing table information. These define the location and size - * of hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + /* Routing table information. These define the location and maximum + * *index* (not byte) for the modem portion of hashable IPv4 and IPv6 + * routing tables (if supported by hardware). The start values are + * byte offsets relative to the start of IPA shared memory. */ u8 v4_hash_route_tbl_info_valid; - struct ipa_mem_array v4_hash_route_tbl_info; + struct ipa_mem_bounds v4_hash_route_tbl_info; u8 v6_hash_route_tbl_info_valid; - struct ipa_mem_array v6_hash_route_tbl_info; + struct ipa_mem_bounds v6_hash_route_tbl_info;
/* Filter table information. These define the location and size - * of hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + * of hashable IPv4 and IPv6 filter tables (if supported by hardware). + * The start values are byte offsets relative to the start of IPA + * shared memory. */ u8 v4_hash_filter_tbl_start_valid; u32 v4_hash_filter_tbl_start; diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 0747866d60ab..02c192837414 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -27,28 +27,38 @@ /** * DOC: IPA Filter and Route Tables * - * The IPA has tables defined in its local shared memory that define filter - * and routing rules. Each entry in these tables contains a 64-bit DMA - * address that refers to DRAM (system memory) containing a rule definition. + * The IPA has tables defined in its local (IPA-resident) memory that define + * filter and routing rules. An entry in either of these tables is a little + * endian 64-bit "slot" that holds the address of a rule definition. (The + * size of these slots is 64 bits regardless of the host DMA address size.) + * + * Separate tables (both filter and route) used for IPv4 and IPv6. There + * are normally another set of "hashed" filter and route tables, which are + * used with a hash of message metadata. Hashed operation is not supported + * by all IPA hardware (IPA v4.2 doesn't support hashed tables). + * + * Rules can be in local memory or in DRAM (system memory). The offset of + * an object (such as a route or filter table) in IPA-resident memory must + * 128-byte aligned. An object in system memory (such as a route or filter + * rule) must be at an 8-byte aligned address. We currently only place + * route or filter rules in system memory. + * * A rule consists of a contiguous block of 32-bit values terminated with * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits * represents "no filtering" or "no routing," and is the reset value for - * filter or route table rules. Separate tables (both filter and route) - * used for IPv4 and IPv6. Additionally, there can be hashed filter or - * route tables, which are used when a hash of message metadata matches. - * Hashed operation is not supported by all IPA hardware. + * filter or route table rules. * * Each filter rule is associated with an AP or modem TX endpoint, though - * not all TX endpoints support filtering. The first 64-bit entry in a + * not all TX endpoints support filtering. The first 64-bit slot in a * filter table is a bitmap indicating which endpoints have entries in * the table. The low-order bit (bit 0) in this bitmap represents a * special global filter, which applies to all traffic. This is not * used in the current code. Bit 1, if set, indicates that there is an - * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the - * table. Bit 2, if set, indicates there is an entry for endpoint 1, - * and so on. Space is set aside in IPA local memory to hold as many - * filter table entries as might be required, but typically they are not - * all used. + * entry (i.e. slot containing a system address referring to a rule) for + * endpoint 0 in the table. Bit 3, if set, indicates there is an entry + * for endpoint 2, and so on. Space is set aside in IPA local memory to + * hold as many filter table entries as might be required, but typically + * they are not all used. * * The AP initializes all entries in a filter table to refer to a "zero" * entry. Once initialized the modem and AP update the entries for @@ -96,13 +106,8 @@ * ---------------------- */
-/* IPA hardware constrains filter and route tables alignment */ -#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */ - /* Assignment of route table entries to the modem and AP */ #define IPA_ROUTE_MODEM_MIN 0 -#define IPA_ROUTE_MODEM_COUNT 8 - #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT #define IPA_ROUTE_AP_COUNT \ (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT) @@ -118,21 +123,14 @@ /* Check things that can be validated at build time. */ static void ipa_table_validate_build(void) { - /* IPA hardware accesses memory 128 bytes at a time. Addresses - * referred to by entries in filter and route tables must be - * aligned on 128-byte byte boundaries. The only rule address - * ever use is the "zero rule", and it's aligned at the base - * of a coherent DMA allocation. - */ - BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN); - - /* Filter and route tables contain DMA addresses that refer to - * filter or route rules. We use a fixed constant to represent - * the size of either type of table entry. Code in ipa_table_init() - * uses a pointer to __le64 to initialize table entriews. + /* Filter and route tables contain DMA addresses that refer + * to filter or route rules. But the size of a table entry + * is 64 bits regardless of what the size of an AP DMA address + * is. A fixed constant defines the size of an entry, and + * code in ipa_table_init() uses a pointer to __le64 to + * initialize tables. */ - BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t)); - BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64)); + BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64));
/* A "zero rule" is used to represent no filtering or no routing. * It is a 64-bit block of zeroed memory. Code in ipa_table_init() @@ -163,7 +161,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed) else mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED] : &ipa->mem[IPA_MEM_V4_ROUTE]; - size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE; + size = IPA_ROUTE_COUNT_MAX * sizeof(__le64); } else { if (ipv6) mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED] @@ -171,7 +169,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed) else mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED] : &ipa->mem[IPA_MEM_V4_FILTER]; - size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE; + size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64); }
if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed)) @@ -270,8 +268,8 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter, if (filter) first++; /* skip over bitmap */
- offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE; - size = count * IPA_TABLE_ENTRY_SIZE; + offset = mem->offset + first * sizeof(__le64); + size = count * sizeof(__le64); addr = ipa_table_addr(ipa, false, count);
ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true); @@ -455,11 +453,11 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, count = 1 + hweight32(ipa->filter_map); hash_count = hash_mem->size ? count : 0; } else { - count = mem->size / IPA_TABLE_ENTRY_SIZE; - hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE; + count = mem->size / sizeof(__le64); + hash_count = hash_mem->size / sizeof(__le64); } - size = count * IPA_TABLE_ENTRY_SIZE; - hash_size = hash_count * IPA_TABLE_ENTRY_SIZE; + size = count * sizeof(__le64); + hash_size = hash_count * sizeof(__le64);
addr = ipa_table_addr(ipa, filter, count); hash_addr = ipa_table_addr(ipa, filter, hash_count); @@ -662,7 +660,13 @@ int ipa_table_init(struct ipa *ipa)
ipa_table_validate_build();
- size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE; + /* The IPA hardware requires route and filter table rules to be + * aligned on a 128-byte boundary. We put the "zero rule" at the + * base of the table area allocated here. The DMA address returned + * by dma_alloc_coherent() is guaranteed to be a power-of-2 number + * of pages, which satisfies the rule alignment requirement. + */ + size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -694,7 +698,7 @@ void ipa_table_exit(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; size_t size;
- size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE; + size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); ipa->table_addr = 0; diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 78038d14fcea..35e519cef25d 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -10,12 +10,12 @@
struct ipa;
-/* The size of a filter or route table entry */ -#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */ - /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */ #define IPA_FILTER_COUNT_MAX 14
+/* The number of route table entries allotted to the modem */ +#define IPA_ROUTE_MODEM_COUNT 8 + /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */ #define IPA_ROUTE_COUNT_MAX 15
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8801d093135c..a33149ee0ddc 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -496,7 +496,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
static int ipvlan_process_outbound(struct sk_buff *skb) { - struct ethhdr *ethh = eth_hdr(skb); int ret = NET_XMIT_DROP;
/* The ipvlan is a pseudo-L2 device, so the packets that we receive @@ -506,6 +505,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb) if (skb_mac_header_was_set(skb)) { /* In this mode we dont care about * multicast and broadcast traffic */ + struct ethhdr *ethh = eth_hdr(skb); + if (is_multicast_ether_addr(ethh->h_dest)) { pr_debug_ratelimited( "Dropped {multi|broad}cast of type=[%x]\n", @@ -590,7 +591,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) { const struct ipvl_dev *ipvlan = netdev_priv(dev); - struct ethhdr *eth = eth_hdr(skb); + struct ethhdr *eth = skb_eth_hdr(skb); struct ipvl_addr *addr; void *lyr3h; int addr_type; @@ -620,6 +621,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); ipvlan_skb_crossing_ns(skb, NULL); ipvlan_multicast_enqueue(ipvlan->port, skb, true); return NET_XMIT_SUCCESS; diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index ea0bf13e8ac3..5bae47f3da40 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -332,6 +332,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) return 0;
unregister: + of_node_put(child); mdiobus_unregister(mdio); return rc; } diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index 75a62d1cc737..7045595f8d7d 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -89,6 +89,9 @@ #define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8) #define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+#define VEND1_GLOBAL_GEN_STAT2 0xc831 +#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15) + #define VEND1_GLOBAL_RSVD_STAT1 0xc885 #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4) #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0) @@ -123,6 +126,12 @@ #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1) #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+/* Sleep and timeout for checking if the Processor-Intensive + * MDIO operation is finished + */ +#define AQR107_OP_IN_PROG_SLEEP 1000 +#define AQR107_OP_IN_PROG_TIMEOUT 100000 + struct aqr107_hw_stat { const char *name; int reg; @@ -569,16 +578,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev) phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n"); }
+static int aqr107_wait_processor_intensive_op(struct phy_device *phydev) +{ + int val, err; + + /* The datasheet notes to wait at least 1ms after issuing a + * processor intensive operation before checking. + * We cannot use the 'sleep_before_read' parameter of read_poll_timeout + * because that just determines the maximum time slept, not the minimum. + */ + usleep_range(1000, 5000); + + err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_GEN_STAT2, val, + !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG), + AQR107_OP_IN_PROG_SLEEP, + AQR107_OP_IN_PROG_TIMEOUT, false); + if (err) { + phydev_err(phydev, "timeout: processor-intensive MDIO operation\n"); + return err; + } + + return 0; +} + static int aqr107_suspend(struct phy_device *phydev) { - return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, - MDIO_CTRL1_LPOWER); + int err; + + err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); + if (err) + return err; + + return aqr107_wait_processor_intensive_op(phydev); }
static int aqr107_resume(struct phy_device *phydev) { - return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, - MDIO_CTRL1_LPOWER); + int err; + + err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); + if (err) + return err; + + return aqr107_wait_processor_intensive_op(phydev); }
static int aqr107_probe(struct phy_device *phydev) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 615f3776b4be..7117d559a32e 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1270,10 +1270,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev, } }
- netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); + if (dev->flags & IFF_UP) { + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + }
port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); @@ -1344,8 +1346,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev) netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); + if (dev->flags & IFF_UP) { + dev_uc_unsync(port_dev, dev); + dev_mc_unsync(port_dev, dev); + } dev_close(port_dev); team_port_leave(team, port);
@@ -1695,6 +1699,14 @@ static int team_open(struct net_device *dev)
static int team_close(struct net_device *dev) { + struct team *team = netdev_priv(dev); + struct team_port *port; + + list_for_each_entry(port, &team->port_list, list) { + dev_uc_unsync(port->dev, dev); + dev_mc_unsync(port->dev, dev); + } + return 0; }
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index d0f3b6d7f408..5c804bcabfe6 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs) if (attrs[WGPEER_A_ENDPOINT]) { struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); + struct endpoint endpoint = { { { 0 } } };
- if ((len == sizeof(struct sockaddr_in) && - addr->sa_family == AF_INET) || - (len == sizeof(struct sockaddr_in6) && - addr->sa_family == AF_INET6)) { - struct endpoint endpoint = { { { 0 } } }; - - memcpy(&endpoint.addr, addr, len); + if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) { + endpoint.addr4 = *(struct sockaddr_in *)addr; + wg_socket_set_peer_endpoint(peer, &endpoint); + } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) { + endpoint.addr6 = *(struct sockaddr_in6 *)addr; wg_socket_set_peer_endpoint(peer, &endpoint); } } diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c index ba87d294604f..d4bb40a695ab 100644 --- a/drivers/net/wireguard/selftest/ratelimiter.c +++ b/drivers/net/wireguard/selftest/ratelimiter.c @@ -6,29 +6,28 @@ #ifdef DEBUG
#include <linux/jiffies.h> -#include <linux/hrtimer.h>
static const struct { bool result; - u64 nsec_to_sleep_before; + unsigned int msec_to_sleep_before; } expected_results[] __initconst = { [0 ... PACKETS_BURSTABLE - 1] = { true, 0 }, [PACKETS_BURSTABLE] = { false, 0 }, - [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND }, + [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND }, [PACKETS_BURSTABLE + 2] = { false, 0 }, - [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, + [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, [PACKETS_BURSTABLE + 4] = { true, 0 }, [PACKETS_BURSTABLE + 5] = { false, 0 } };
static __init unsigned int maximum_jiffies_at_index(int index) { - u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3; + unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3; int i;
for (i = 0; i <= index; ++i) - total_nsecs += expected_results[i].nsec_to_sleep_before; - return nsecs_to_jiffies(total_nsecs); + total_msecs += expected_results[i].msec_to_sleep_before; + return msecs_to_jiffies(total_msecs); }
static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, @@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, loop_start_time = jiffies;
for (i = 0; i < ARRAY_SIZE(expected_results); ++i) { - if (expected_results[i].nsec_to_sleep_before) { - ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3), - ns_to_ktime(expected_results[i].nsec_to_sleep_before)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME); - } + if (expected_results[i].msec_to_sleep_before) + msleep(expected_results[i].msec_to_sleep_before);
if (time_is_before_jiffies(loop_start_time + maximum_jiffies_at_index(i))) @@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void) if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) return true;
- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0); + BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
if (wg_ratelimiter_init()) goto out; @@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void) ++test; #endif
- for (trials = TRIALS_BEFORE_GIVING_UP;;) { + for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) { int test_count = 0, ret;
ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 1465a92ea3fc..b26617026e83 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -950,7 +950,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) offset %= 32;
val = mt76_rr(dev, addr); - val >>= (tid % 32); + val >>= offset;
if (offset > 20) { addr += 4; diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index dc78a523a69f..b6b938aa6615 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device) struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) { struct dasd_eckd_private *alias_priv, *private = base_device->private; - struct alias_pav_group *group = private->pavgroup; struct alias_lcu *lcu = private->lcu; struct dasd_device *alias_device; + struct alias_pav_group *group; unsigned long flags;
- if (!group || !lcu) + if (!lcu) return NULL; if (lcu->pav == NO_PAV || lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) @@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) }
spin_lock_irqsave(&lcu->lock, flags); + group = private->pavgroup; + if (!group) { + spin_unlock_irqrestore(&lcu->lock, flags); + return NULL; + } alias_device = group->next; if (!alias_device) { if (list_empty(&group->aliaslist)) { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 3153f164554a..c1b76cda60db 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2822,23 +2822,22 @@ static int _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { struct sysinfo s; - int dma_mask;
if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma || - dma_get_required_mask(&pdev->dev) <= 32) - dma_mask = 32; + dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) + ioc->dma_mask = 32; /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) - dma_mask = 63; + ioc->dma_mask = 63; else - dma_mask = 64; + ioc->dma_mask = 64;
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) || - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask))) + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask))) return -ENODEV;
- if (dma_mask > 32) { + if (ioc->dma_mask > 32) { ioc->base_add_sg_single = &_base_add_sg_single_64; ioc->sge_size = sizeof(Mpi2SGESimple64_t); } else { @@ -2848,7 +2847,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
si_meminfo(&s); ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", - dma_mask, convert_to_kb(s.totalram)); + ioc->dma_mask, convert_to_kb(s.totalram));
return 0; } @@ -4902,10 +4901,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) dma_pool_free(ioc->pcie_sgl_dma_pool, ioc->pcie_sg_lookup[i].pcie_sgl, ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->pcie_sg_lookup[i].pcie_sgl = NULL; } dma_pool_destroy(ioc->pcie_sgl_dma_pool); } - if (ioc->config_page) { dexitprintk(ioc, ioc_info(ioc, "config_page(0x%p): free\n", @@ -4960,6 +4959,89 @@ mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz) return 0; }
+/** + * _base_reduce_hba_queue_depth- Retry with reduced queue depth + * @ioc: Adapter object + * + * Return: 0 for success, non-zero for failure. + **/ +static inline int +_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc) +{ + int reduce_sz = 64; + + if ((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= reduce_sz; + return 0; + } else + return -ENOMEM; +} + +/** + * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory + * for pcie sgl pools. + * @ioc: Adapter object + * @sz: DMA Pool size + * @ct: Chain tracker + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + int i = 0, j = 0; + struct chain_tracker *ct; + + ioc->pcie_sgl_dma_pool = + dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, + ioc->page_size, 0); + if (!ioc->pcie_sgl_dma_pool) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n"); + return -ENOMEM; + } + + ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; + ioc->chains_per_prp_buffer = + min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->pcie_sg_lookup[i].pcie_sgl = + dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL, + &ioc->pcie_sg_lookup[i].pcie_sgl_dma); + if (!ioc->pcie_sg_lookup[i].pcie_sgl) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); + return -EAGAIN; + } + + if (!mpt3sas_check_same_4gb_region( + (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) { + ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n", + ioc->pcie_sg_lookup[i].pcie_sgl, + (unsigned long long) + ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + + for (j = 0; j < ioc->chains_per_prp_buffer; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + ct->chain_buffer = + ioc->pcie_sg_lookup[i].pcie_sgl + + (j * ioc->chain_segment_sz); + ct->chain_buffer_dma = + ioc->pcie_sg_lookup[i].pcie_sgl_dma + + (j * ioc->chain_segment_sz); + } + } + dinitprintk(ioc, ioc_info(ioc, + "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); + dinitprintk(ioc, ioc_info(ioc, + "Number of chains can fit in a PRP page(%d)\n", + ioc->chains_per_prp_buffer)); + return 0; +} + /** * base_alloc_rdpq_dma_pool - Allocating DMA'able memory * for reply queues. @@ -5058,7 +5140,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) unsigned short sg_tablesize; u16 sge_size; int i, j; - int ret = 0; + int ret = 0, rc = 0; struct chain_tracker *ct;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); @@ -5357,6 +5439,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) * be required for NVMe PRP's, only each set of NVMe blocks will be * contiguous, so a new set is allocated for each possible I/O. */ + ioc->chains_per_prp_buffer = 0; if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { nvme_blocks_needed = @@ -5371,43 +5454,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) goto out; } sz = nvme_blocks_needed * ioc->page_size; - ioc->pcie_sgl_dma_pool = - dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0); - if (!ioc->pcie_sgl_dma_pool) { - ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n"); - goto out; - } - - ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; - ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer, - ioc->chains_needed_per_io); - - for (i = 0; i < ioc->scsiio_depth; i++) { - ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc( - ioc->pcie_sgl_dma_pool, GFP_KERNEL, - &ioc->pcie_sg_lookup[i].pcie_sgl_dma); - if (!ioc->pcie_sg_lookup[i].pcie_sgl) { - ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); - goto out; - } - for (j = 0; j < ioc->chains_per_prp_buffer; j++) { - ct = &ioc->chain_lookup[i].chains_per_smid[j]; - ct->chain_buffer = - ioc->pcie_sg_lookup[i].pcie_sgl + - (j * ioc->chain_segment_sz); - ct->chain_buffer_dma = - ioc->pcie_sg_lookup[i].pcie_sgl_dma + - (j * ioc->chain_segment_sz); - } - } - - dinitprintk(ioc, - ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->scsiio_depth, sz, - (sz * ioc->scsiio_depth) / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n", - ioc->chains_per_prp_buffer)); + rc = _base_allocate_pcie_sgl_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; total_sz += sz * ioc->scsiio_depth; }
@@ -5577,6 +5628,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->shost->sg_tablesize); return 0;
+try_32bit_dma: + _base_release_memory_pools(ioc); + if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { + /* Change dma coherent mask to 32 bit and reallocate */ + if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + pr_err("Setting 32 bit coherent DMA mask Failed %s\n", + pci_name(ioc->pdev)); + return -ENODEV; + } + } else if (_base_reduce_hba_queue_depth(ioc) != 0) + return -ENOMEM; + goto retry_allocation; + out: return -ENOMEM; } @@ -7239,6 +7303,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->rdpq_array_enable_assigned = 0; ioc->use_32bit_dma = false; + ioc->dma_mask = 64; if (ioc->is_aero_ioc) ioc->base_readl = &_base_readl_aero; else diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index bc8beb10f3fc..823bbe64a477 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -1257,6 +1257,7 @@ struct MPT3SAS_ADAPTER { u16 thresh_hold; u8 high_iops_queues; u32 drv_support_bitmap; + u32 dma_mask; bool enable_sdev_max_qd; bool use_32bit_dma;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 602065bfc9bb..b7872ad3e762 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -295,20 +295,16 @@ static int atmel_config_rs485(struct uart_port *port,
mode = atmel_uart_readl(port, ATMEL_US_MR);
- /* Resetting serial mode to RS232 (0x0) */ - mode &= ~ATMEL_US_USMODE; - - port->rs485 = *rs485conf; - if (rs485conf->flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); - if (port->rs485.flags & SER_RS485_RX_DURING_TX) + if (rs485conf->flags & SER_RS485_RX_DURING_TX) atmel_port->tx_done_mask = ATMEL_US_TXRDY; else atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
atmel_uart_writel(port, ATMEL_US_TTGR, rs485conf->delay_rts_after_send); + mode &= ~ATMEL_US_USMODE; mode |= ATMEL_US_USMODE_RS485; } else { dev_dbg(port->dev, "Setting UART to RS232\n"); diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index c2be22c3b7d1..cda71802b698 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -520,7 +520,7 @@ static void tegra_uart_tx_dma_complete(void *args) count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); spin_lock_irqsave(&tup->uport.lock, flags); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(&tup->uport, count); tup->tx_in_progress = 0; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&tup->uport); @@ -608,7 +608,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u) static void tegra_uart_stop_tx(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); - struct circ_buf *xmit = &tup->uport.state->xmit; struct dma_tx_state state; unsigned int count;
@@ -619,7 +618,7 @@ static void tegra_uart_stop_tx(struct uart_port *u) dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(&tup->uport, count); tup->tx_in_progress = 0; }
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c index aaf8748a6147..31ae705aa38b 100644 --- a/drivers/tty/serial/tegra-tcu.c +++ b/drivers/tty/serial/tegra-tcu.c @@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port) break;
tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(port, count); }
uart_write_wakeup(port); diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index c6fc14b169da..e3a8b6c71aa1 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -1531,7 +1531,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, TRB_LEN(le32_to_cpu(trb->length));
if (priv_req->num_of_trb > 1 && - le32_to_cpu(trb->control) & TRB_SMM) + le32_to_cpu(trb->control) & TRB_SMM && + le32_to_cpu(trb->control) & TRB_CHAIN) transfer_end = true;
cdns3_ep_inc_deq(priv_ep); @@ -1691,6 +1692,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) ep_cfg &= ~EP_CFG_ENABLE; writel(ep_cfg, &priv_dev->regs->ep_cfg); priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; + priv_ep->flags |= EP_UPDATE_EP_TRBADDR; } cdns3_transfer_completed(priv_dev, priv_ep); } else if (!(priv_ep->flags & EP_STALLED) && diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 53b3d77fba6a..f2a3c0b5b535 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -5968,7 +5968,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) * * Return: The same as for usb_reset_and_verify_device(). * However, if a reset is already in progress (for instance, if a - * driver doesn't have pre_ or post_reset() callbacks, and while + * driver doesn't have pre_reset() or post_reset() callbacks, and while * being unbound or re-bound during the ongoing reset its disconnect() * or probe() routine tries to perform a second, nested reset), the * routine returns -EINPROGRESS. diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 5aae7504f78a..4a0eec176511 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -114,8 +114,6 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) dwc->current_dr_role = mode; }
-static int dwc3_core_soft_reset(struct dwc3 *dwc); - static void __dwc3_set_mode(struct work_struct *work) { struct dwc3 *dwc = work_to_dwc(work); @@ -265,7 +263,7 @@ u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) * dwc3_core_soft_reset - Issues core soft reset and PHY reset * @dwc: pointer to our context structure */ -static int dwc3_core_soft_reset(struct dwc3 *dwc) +int dwc3_core_soft_reset(struct dwc3 *dwc) { u32 reg; int retries = 1000; diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 79e1b82e5e05..cbebe541f7e8 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1010,6 +1010,7 @@ struct dwc3_scratchpad_array { * @tx_max_burst_prd: max periodic ESS transmit burst size * @hsphy_interface: "utmi" or "ulpi" * @connected: true when we're connected to a host, false otherwise + * @softconnect: true when gadget connect is called, false when disconnect runs * @delayed_status: true when gadget driver asks for delayed status * @ep0_bounced: true when we used bounce buffer * @ep0_expect_in: true when we expect a DATA IN transfer @@ -1218,6 +1219,7 @@ struct dwc3 { const char *hsphy_interface;
unsigned connected:1; + unsigned softconnect:1; unsigned delayed_status:1; unsigned ep0_bounced:1; unsigned ep0_expect_in:1; @@ -1456,6 +1458,8 @@ bool dwc3_has_imod(struct dwc3 *dwc); int dwc3_event_buffers_setup(struct dwc3 *dwc); void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
+int dwc3_core_soft_reset(struct dwc3 *dwc); + #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) int dwc3_host_init(struct dwc3 *dwc); void dwc3_host_exit(struct dwc3 *dwc); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a2a10c05ef3f..41ed2f6f8a8d 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2120,14 +2120,42 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc); static void __dwc3_gadget_stop(struct dwc3 *dwc); static int __dwc3_gadget_start(struct dwc3 *dwc);
+static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc) +{ + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + dwc->connected = false; + + /* + * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a + * Section 4.1.8 Table 4-7, it states that for a device-initiated + * disconnect, the SW needs to ensure that it sends "a DEPENDXFER + * command for any active transfers" before clearing the RunStop + * bit. + */ + dwc3_stop_active_transfers(dwc); + __dwc3_gadget_stop(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + + /* + * Note: if the GEVNTCOUNT indicates events in the event buffer, the + * driver needs to acknowledge them before the controller can halt. + * Simply let the interrupt handler acknowledges and handle the + * remaining event generated by the controller while polling for + * DSTS.DEVCTLHLT. + */ + return dwc3_gadget_run_stop(dwc, false, false); +} + static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) { struct dwc3 *dwc = gadget_to_dwc(g); - unsigned long flags; int ret;
is_on = !!is_on;
+ dwc->softconnect = is_on; /* * Per databook, when we want to stop the gadget, if a control transfer * is still in process, complete it and get the core into setup phase. @@ -2163,50 +2191,27 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) return 0; }
- /* - * Synchronize and disable any further event handling while controller - * is being enabled/disabled. - */ - disable_irq(dwc->irq_gadget); - - spin_lock_irqsave(&dwc->lock, flags); + if (dwc->pullups_connected == is_on) { + pm_runtime_put(dwc->dev); + return 0; + }
if (!is_on) { - u32 count; - - dwc->connected = false; + ret = dwc3_gadget_soft_disconnect(dwc); + } else { /* - * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a - * Section 4.1.8 Table 4-7, it states that for a device-initiated - * disconnect, the SW needs to ensure that it sends "a DEPENDXFER - * command for any active transfers" before clearing the RunStop - * bit. + * In the Synopsys DWC_usb31 1.90a programming guide section + * 4.1.9, it specifies that for a reconnect after a + * device-initiated disconnect requires a core soft reset + * (DCTL.CSftRst) before enabling the run/stop bit. */ - dwc3_stop_active_transfers(dwc); - __dwc3_gadget_stop(dwc); + dwc3_core_soft_reset(dwc);
- /* - * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a - * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the - * "software needs to acknowledge the events that are generated - * (by writing to GEVNTCOUNTn) while it is waiting for this bit - * to be set to '1'." - */ - count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); - count &= DWC3_GEVNTCOUNT_MASK; - if (count > 0) { - dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); - dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) % - dwc->ev_buf->length; - } - } else { + dwc3_event_buffers_setup(dwc); __dwc3_gadget_start(dwc); + ret = dwc3_gadget_run_stop(dwc, true, false); }
- ret = dwc3_gadget_run_stop(dwc, is_on, false); - spin_unlock_irqrestore(&dwc->lock, flags); - enable_irq(dwc->irq_gadget); - pm_runtime_put(dwc->dev);
return ret; @@ -4048,7 +4053,7 @@ int dwc3_gadget_resume(struct dwc3 *dwc) { int ret;
- if (!dwc->gadget_driver) + if (!dwc->gadget_driver || !dwc->softconnect) return 0;
ret = __dwc3_gadget_start(dwc); diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index 8950d1f10a7f..86c4bc9df3b8 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -25,6 +25,13 @@ */ #define TT_MICROFRAMES_MAX 9
+/* schedule error type */ +#define ESCH_SS_Y6 1001 +#define ESCH_SS_OVERLAP 1002 +#define ESCH_CS_OVERFLOW 1003 +#define ESCH_BW_OVERFLOW 1004 +#define ESCH_FIXME 1005 + /* mtk scheduler bitmasks */ #define EP_BPKTS(p) ((p) & 0x7f) #define EP_BCSCOUNT(p) (((p) & 0x7) << 8) @@ -32,6 +39,24 @@ #define EP_BOFFSET(p) ((p) & 0x3fff) #define EP_BREPEAT(p) (((p) & 0x7fff) << 16)
+static char *sch_error_string(int err_num) +{ + switch (err_num) { + case ESCH_SS_Y6: + return "Can't schedule Start-Split in Y6"; + case ESCH_SS_OVERLAP: + return "Can't find a suitable Start-Split location"; + case ESCH_CS_OVERFLOW: + return "The last Complete-Split is greater than 7"; + case ESCH_BW_OVERFLOW: + return "Bandwidth exceeds the maximum limit"; + case ESCH_FIXME: + return "FIXME, to be resolved"; + default: + return "Unknown"; + } +} + static int is_fs_or_ls(enum usb_device_speed speed) { return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW; @@ -375,7 +400,6 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, sch_ep->bw_budget_table[j]; } } - sch_ep->allocated = used; }
static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) @@ -384,19 +408,20 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) u32 num_esit, tmp; int base; int i, j; + u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; + + if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP) + offset++; + for (i = 0; i < num_esit; i++) { base = offset + i * sch_ep->esit;
- /* - * Compared with hs bus, no matter what ep type, - * the hub will always delay one uframe to send data - */ - for (j = 0; j < sch_ep->cs_count; j++) { + for (j = 0; j < uframes; j++) { tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe; if (tmp > FS_PAYLOAD_MAX) - return -ERANGE; + return -ESCH_BW_OVERFLOW; } }
@@ -406,15 +431,11 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) static int check_sch_tt(struct usb_device *udev, struct mu3h_sch_ep_info *sch_ep, u32 offset) { - struct mu3h_sch_tt *tt = sch_ep->sch_tt; u32 extra_cs_count; - u32 fs_budget_start; u32 start_ss, last_ss; u32 start_cs, last_cs; - int i;
start_ss = offset % 8; - fs_budget_start = (start_ss + 1) % 8;
if (sch_ep->ep_type == ISOC_OUT_EP) { last_ss = start_ss + sch_ep->cs_count - 1; @@ -424,11 +445,7 @@ static int check_sch_tt(struct usb_device *udev, * must never schedule Start-Split in Y6 */ if (!(start_ss == 7 || last_ss < 6)) - return -ERANGE; - - for (i = 0; i < sch_ep->cs_count; i++) - if (test_bit(offset + i, tt->ss_bit_map)) - return -ERANGE; + return -ESCH_SS_Y6;
} else { u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); @@ -438,29 +455,24 @@ static int check_sch_tt(struct usb_device *udev, * must never schedule Start-Split in Y6 */ if (start_ss == 6) - return -ERANGE; + return -ESCH_SS_Y6;
/* one uframe for ss + one uframe for idle */ start_cs = (start_ss + 2) % 8; last_cs = start_cs + cs_count - 1;
if (last_cs > 7) - return -ERANGE; + return -ESCH_CS_OVERFLOW;
if (sch_ep->ep_type == ISOC_IN_EP) extra_cs_count = (last_cs == 7) ? 1 : 2; else /* ep_type : INTR IN / INTR OUT */ - extra_cs_count = (fs_budget_start == 6) ? 1 : 2; + extra_cs_count = 1;
cs_count += extra_cs_count; if (cs_count > 7) cs_count = 7; /* HW limit */
- for (i = 0; i < cs_count + 2; i++) { - if (test_bit(offset + i, tt->ss_bit_map)) - return -ERANGE; - } - sch_ep->cs_count = cs_count; /* one for ss, the other for idle */ sch_ep->num_budget_microframes = cs_count + 2; @@ -482,28 +494,24 @@ static void update_sch_tt(struct usb_device *udev, struct mu3h_sch_tt *tt = sch_ep->sch_tt; u32 base, num_esit; int bw_updated; - int bits; int i, j; + int offset = sch_ep->offset; + u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; - bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
if (used) bw_updated = sch_ep->bw_cost_per_microframe; else bw_updated = -sch_ep->bw_cost_per_microframe;
- for (i = 0; i < num_esit; i++) { - base = sch_ep->offset + i * sch_ep->esit; + if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP) + offset++;
- for (j = 0; j < bits; j++) { - if (used) - set_bit(base + j, tt->ss_bit_map); - else - clear_bit(base + j, tt->ss_bit_map); - } + for (i = 0; i < num_esit; i++) { + base = offset + i * sch_ep->esit;
- for (j = 0; j < sch_ep->cs_count; j++) + for (j = 0; j < uframes; j++) tt->fs_bus_bw[base + j] += bw_updated; }
@@ -513,21 +521,48 @@ static void update_sch_tt(struct usb_device *udev, list_del(&sch_ep->tt_endpoint); }
+static int load_ep_bw(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw, + struct mu3h_sch_ep_info *sch_ep, bool loaded) +{ + if (sch_ep->sch_tt) + update_sch_tt(udev, sch_ep, loaded); + + /* update bus bandwidth info */ + update_bus_bw(sch_bw, sch_ep, loaded); + sch_ep->allocated = loaded; + + return 0; +} + +static u32 get_esit_boundary(struct mu3h_sch_ep_info *sch_ep) +{ + u32 boundary = sch_ep->esit; + + if (sch_ep->sch_tt) { /* LS/FS with TT */ + /* + * tune for CS, normally esit >= 8 for FS/LS, + * not add one for other types to avoid access array + * out of boundary + */ + if (sch_ep->ep_type == ISOC_OUT_EP && boundary > 1) + boundary--; + } + + return boundary; +} + static int check_sch_bw(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) { u32 offset; - u32 esit; u32 min_bw; u32 min_index; u32 worst_bw; u32 bw_boundary; + u32 esit_boundary; u32 min_num_budget; u32 min_cs_count; - bool tt_offset_ok = false; - int ret; - - esit = sch_ep->esit; + int ret = 0;
/* * Search through all possible schedule microframes. @@ -537,16 +572,15 @@ static int check_sch_bw(struct usb_device *udev, min_index = 0; min_cs_count = sch_ep->cs_count; min_num_budget = sch_ep->num_budget_microframes; - for (offset = 0; offset < esit; offset++) { - if (is_fs_or_ls(udev->speed)) { + esit_boundary = get_esit_boundary(sch_ep); + for (offset = 0; offset < sch_ep->esit; offset++) { + if (sch_ep->sch_tt) { ret = check_sch_tt(udev, sch_ep, offset); if (ret) continue; - else - tt_offset_ok = true; }
- if ((offset + sch_ep->num_budget_microframes) > sch_ep->esit) + if ((offset + sch_ep->num_budget_microframes) > esit_boundary) break;
worst_bw = get_max_bw(sch_bw, sch_ep, offset); @@ -569,35 +603,21 @@ static int check_sch_bw(struct usb_device *udev,
/* check bandwidth */ if (min_bw > bw_boundary) - return -ERANGE; + return ret ? ret : -ESCH_BW_OVERFLOW;
sch_ep->offset = min_index; sch_ep->cs_count = min_cs_count; sch_ep->num_budget_microframes = min_num_budget;
- if (is_fs_or_ls(udev->speed)) { - /* all offset for tt is not ok*/ - if (!tt_offset_ok) - return -ERANGE; - - update_sch_tt(udev, sch_ep, 1); - } - - /* update bus bandwidth info */ - update_bus_bw(sch_bw, sch_ep, 1); - - return 0; + return load_ep_bw(udev, sch_bw, sch_ep, true); }
static void destroy_sch_ep(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) { /* only release ep bw check passed by check_sch_bw() */ - if (sch_ep->allocated) { - update_bus_bw(sch_bw, sch_ep, 0); - if (sch_ep->sch_tt) - update_sch_tt(udev, sch_ep, 0); - } + if (sch_ep->allocated) + load_ep_bw(udev, sch_bw, sch_ep, false);
if (sch_ep->sch_tt) drop_tt(udev); @@ -760,7 +780,8 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
ret = check_sch_bw(udev, sch_bw, sch_ep); if (ret) { - xhci_err(xhci, "Not enough bandwidth!\n"); + xhci_err(xhci, "Not enough bandwidth! (%s)\n", + sch_error_string(-ret)); return -ENOSPC; } } diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index 2fc0568ba054..3e2c607b5d64 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -20,14 +20,12 @@ #define XHCI_MTK_MAX_ESIT 64
/** - * @ss_bit_map: used to avoid start split microframes overlay * @fs_bus_bw: array to keep track of bandwidth already used for FS * @ep_list: Endpoints using this TT * @usb_tt: usb TT related * @tt_port: TT port number */ struct mu3h_sch_tt { - DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT); u32 fs_bus_bw[XHCI_MTK_MAX_ESIT]; struct list_head ep_list; struct usb_tt *usb_tt; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 211e03a20407..eea3dd18a044 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EM060K 0x030b #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 +#define QUECTEL_PRODUCT_RM520N 0x0801 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 #define QUECTEL_PRODUCT_EC200T 0x6026 #define QUECTEL_PRODUCT_RM500K 0x7001 @@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */ + .driver_info = ZLP }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), @@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), .driver_info = ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index acdef6fbb85e..80daa70e288b 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -83,8 +83,6 @@ enum { /* * Input Output Manager (IOM) PORT STATUS */ -#define IOM_PORT_STATUS_OFFSET 0x560 - #define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6) #define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6 #define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03 @@ -144,6 +142,7 @@ struct pmc_usb { struct pmc_usb_port *port; struct acpi_device *iom_adev; void __iomem *iom_base; + u32 iom_port_status_offset; };
static void update_port_status(struct pmc_usb_port *port) @@ -153,7 +152,8 @@ static void update_port_status(struct pmc_usb_port *port) /* SoC expects the USB Type-C port numbers to start with 0 */ port_num = port->usb3_port - 1;
- port->iom_status = readl(port->pmc->iom_base + IOM_PORT_STATUS_OFFSET + + port->iom_status = readl(port->pmc->iom_base + + port->pmc->iom_port_status_offset + port_num * sizeof(u32)); }
@@ -541,19 +541,42 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
static int is_memory(struct acpi_resource *res, void *data) { - struct resource r; + struct resource_win win = {}; + struct resource *r = &win.res;
- return !acpi_dev_resource_memory(res, &r); + return !(acpi_dev_resource_memory(res, r) || + acpi_dev_resource_address_space(res, &win)); }
+/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */ +static const struct acpi_device_id iom_acpi_ids[] = { + /* TigerLake */ + { "INTC1072", 0x560, }, + + /* AlderLake */ + { "INTC1079", 0x160, }, + + /* Meteor Lake */ + { "INTC107A", 0x160, }, + {} +}; + static int pmc_usb_probe_iom(struct pmc_usb *pmc) { struct list_head resource_list; struct resource_entry *rentry; - struct acpi_device *adev; + static const struct acpi_device_id *dev_id; + struct acpi_device *adev = NULL; int ret;
- adev = acpi_dev_get_first_match_dev("INTC1072", NULL, -1); + for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) { + if (acpi_dev_present(dev_id->id, NULL, -1)) { + pmc->iom_port_status_offset = (u32)dev_id->driver_data; + adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1); + break; + } + } + if (!adev) return -ENODEV;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index fbd438e9b9b0..ce50ca9a320c 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -98,6 +98,12 @@ struct vfio_dma { unsigned long *bitmap; };
+struct vfio_batch { + struct page **pages; /* for pin_user_pages_remote */ + struct page *fallback_page; /* if pages alloc fails */ + int capacity; /* length of pages array */ +}; + struct vfio_group { struct iommu_group *iommu_group; struct list_head next; @@ -428,6 +434,31 @@ static int put_pfn(unsigned long pfn, int prot) return 0; }
+#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *)) + +static void vfio_batch_init(struct vfio_batch *batch) +{ + if (unlikely(disable_hugepages)) + goto fallback; + + batch->pages = (struct page **) __get_free_page(GFP_KERNEL); + if (!batch->pages) + goto fallback; + + batch->capacity = VFIO_BATCH_MAX_CAPACITY; + return; + +fallback: + batch->pages = &batch->fallback_page; + batch->capacity = 1; +} + +static void vfio_batch_fini(struct vfio_batch *batch) +{ + if (batch->capacity == VFIO_BATCH_MAX_CAPACITY) + free_page((unsigned long)batch->pages); +} + static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, unsigned long vaddr, unsigned long *pfn, bool write_fault) @@ -464,10 +495,14 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, return ret; }
-static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, - int prot, unsigned long *pfn) +/* + * Returns the positive number of pfns successfully obtained or a negative + * error code. + */ +static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, + long npages, int prot, unsigned long *pfn, + struct page **pages) { - struct page *page[1]; struct vm_area_struct *vma; unsigned int flags = 0; int ret; @@ -476,11 +511,22 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, flags |= FOLL_WRITE;
mmap_read_lock(mm); - ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM, - page, NULL, NULL); - if (ret == 1) { - *pfn = page_to_pfn(page[0]); - ret = 0; + ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, + pages, NULL, NULL); + if (ret > 0) { + int i; + + /* + * The zero page is always resident, we don't need to pin it + * and it falls into our invalid/reserved test so we don't + * unpin in put_pfn(). Unpin all zero pages in the batch here. + */ + for (i = 0 ; i < ret; i++) { + if (unlikely(is_zero_pfn(page_to_pfn(pages[i])))) + unpin_user_page(pages[i]); + } + + *pfn = page_to_pfn(pages[0]); goto done; }
@@ -494,8 +540,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, if (ret == -EAGAIN) goto retry;
- if (!ret && !is_invalid_reserved_pfn(*pfn)) - ret = -EFAULT; + if (!ret) { + if (is_invalid_reserved_pfn(*pfn)) + ret = 1; + else + ret = -EFAULT; + } } done: mmap_read_unlock(mm); @@ -509,7 +559,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, */ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, long npage, unsigned long *pfn_base, - unsigned long limit) + unsigned long limit, struct vfio_batch *batch) { unsigned long pfn = 0; long ret, pinned = 0, lock_acct = 0; @@ -520,8 +570,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, if (!current->mm) return -ENODEV;
- ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); - if (ret) + ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, pfn_base, + batch->pages); + if (ret < 0) return ret;
pinned++; @@ -547,8 +598,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, /* Lock all the consecutive pages from pfn_base */ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); - if (ret) + ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, &pfn, + batch->pages); + if (ret < 0) break;
if (pfn != *pfn_base + pinned || @@ -574,7 +626,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, ret = vfio_lock_acct(dma, lock_acct, false);
unpin_out: - if (ret) { + if (ret < 0) { if (!rsvd) { for (pfn = *pfn_base ; pinned ; pfn++, pinned--) put_pfn(pfn, dma->prot); @@ -610,6 +662,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, unsigned long *pfn_base, bool do_accounting) { + struct page *pages[1]; struct mm_struct *mm; int ret;
@@ -617,8 +670,13 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, if (!mm) return -ENODEV;
- ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); - if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { + ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); + if (ret != 1) + goto out; + + ret = 0; + + if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { ret = vfio_lock_acct(dma, 1, true); if (ret) { put_pfn(*pfn_base, dma->prot); @@ -630,6 +688,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, } }
+out: mmput(mm); return ret; } @@ -1263,15 +1322,19 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, { dma_addr_t iova = dma->iova; unsigned long vaddr = dma->vaddr; + struct vfio_batch batch; size_t size = map_size; long npage; unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret = 0;
+ vfio_batch_init(&batch); + while (size) { /* Pin a contiguous chunk of memory */ npage = vfio_pin_pages_remote(dma, vaddr + dma->size, - size >> PAGE_SHIFT, &pfn, limit); + size >> PAGE_SHIFT, &pfn, limit, + &batch); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1291,6 +1354,7 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, dma->size += npage << PAGE_SHIFT; }
+ vfio_batch_fini(&batch); dma->iommu_mapped = true;
if (ret) @@ -1449,6 +1513,7 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { + struct vfio_batch batch; struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; @@ -1459,6 +1524,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
+ vfio_batch_init(&batch); + n = rb_first(&iommu->dma_list);
for (; n; n = rb_next(n)) { @@ -1506,7 +1573,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
npage = vfio_pin_pages_remote(dma, vaddr, n >> PAGE_SHIFT, - &pfn, limit); + &pfn, limit, + &batch); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1539,6 +1607,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, dma->iommu_mapped = true; }
+ vfio_batch_fini(&batch); return 0;
unwind: @@ -1579,6 +1648,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, } }
+ vfio_batch_fini(&batch); return ret; }
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 24c6f36177ba..a6ca4eda9a5a 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -230,6 +230,8 @@ extern unsigned int setup_special_user_owner_ACE(struct cifs_ace *pace); extern void dequeue_mid(struct mid_q_entry *mid, bool malformed); extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read); +extern ssize_t cifs_discard_from_socket(struct TCP_Server_Info *server, + size_t to_read); extern int cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, unsigned int page_offset, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 0496934feecb..c279527aae92 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1451,9 +1451,9 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server) while (remaining > 0) { int length;
- length = cifs_read_from_socket(server, server->bigbuf, - min_t(unsigned int, remaining, - CIFSMaxBufSize + MAX_HEADER_SIZE(server))); + length = cifs_discard_from_socket(server, + min_t(size_t, remaining, + CIFSMaxBufSize + MAX_HEADER_SIZE(server))); if (length < 0) return length; server->total_read += length; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 7f5d173760cf..d1c3086d7ddd 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -695,9 +695,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) int length = 0; int total_read;
- smb_msg->msg_control = NULL; - smb_msg->msg_controllen = 0; - for (total_read = 0; msg_data_left(smb_msg); total_read += length) { try_to_freeze();
@@ -748,18 +745,33 @@ int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read) { - struct msghdr smb_msg; + struct msghdr smb_msg = {}; struct kvec iov = {.iov_base = buf, .iov_len = to_read}; iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
return cifs_readv_from_socket(server, &smb_msg); }
+ssize_t +cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read) +{ + struct msghdr smb_msg = {}; + + /* + * iov_iter_discard already sets smb_msg.type and count and iov_offset + * and cifs_readv_from_socket sets msg_control and msg_controllen + * so little to initialize in struct msghdr + */ + iov_iter_discard(&smb_msg.msg_iter, READ, to_read); + + return cifs_readv_from_socket(server, &smb_msg); +} + int cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, unsigned int page_offset, unsigned int to_read) { - struct msghdr smb_msg; + struct msghdr smb_msg = {}; struct bio_vec bv = { .bv_page = page, .bv_len = to_read, .bv_offset = page_offset}; iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read); diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 383ae8744c33..b137006f0fd2 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -209,10 +209,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
*sent = 0;
- smb_msg->msg_name = NULL; - smb_msg->msg_namelen = 0; - smb_msg->msg_control = NULL; - smb_msg->msg_controllen = 0; if (server->noblocksnd) smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; else @@ -324,7 +320,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, sigset_t mask, oldmask; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; - struct msghdr smb_msg; + struct msghdr smb_msg = {}; __be32 rfc1002_marker;
if (cifs_rdma_enabled(server)) { diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 0f49bf547b84..30add5a3df3d 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -459,6 +459,10 @@ static int __ext4_ext_check(const char *function, unsigned int line, error_msg = "invalid eh_entries"; goto corrupted; } + if (unlikely((eh->eh_entries == 0) && (depth > 0))) { + error_msg = "eh_entries is 0 but eh_depth is > 0"; + goto corrupted; + } if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) { error_msg = "invalid extent entries"; goto corrupted; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 875af329c43e..c53c9b132204 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -508,7 +508,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, goto fallback; }
- max_dirs = ndirs / ngroups + inodes_per_group / 16; + max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16; min_inodes = avefreei - inodes_per_group*flex_size / 4; if (min_inodes < 1) min_inodes = 1; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c32d0895c3a3..d5ca02a7766e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4959,6 +4959,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ext4_fsblk_t block = 0; unsigned int inquota = 0; unsigned int reserv_clstrs = 0; + int retries = 0; u64 seq;
might_sleep(); @@ -5061,7 +5062,8 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ar->len = ac->ac_b_ex.fe_len; } } else { - if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) + if (++retries < 3 && + ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) goto repeat; /* * If block allocation fails then the pa allocated above diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index c667c63f2cb0..fa8aefe6b7ec 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -358,19 +358,36 @@ xfs_dinode_verify_fork( int whichfork) { uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork); + mode_t mode = be16_to_cpu(dip->di_mode); + uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork); + uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork);
- switch (XFS_DFORK_FORMAT(dip, whichfork)) { + /* + * For fork types that can contain local data, check that the fork + * format matches the size of local data contained within the fork. + * + * For all types, check that when the size says the should be in extent + * or btree format, the inode isn't claiming it is in local format. + */ + if (whichfork == XFS_DATA_FORK) { + if (S_ISDIR(mode) || S_ISLNK(mode)) { + if (be64_to_cpu(dip->di_size) <= fork_size && + fork_format != XFS_DINODE_FMT_LOCAL) + return __this_address; + } + + if (be64_to_cpu(dip->di_size) > fork_size && + fork_format == XFS_DINODE_FMT_LOCAL) + return __this_address; + } + + switch (fork_format) { case XFS_DINODE_FMT_LOCAL: /* - * no local regular files yet + * No local regular files yet. */ - if (whichfork == XFS_DATA_FORK) { - if (S_ISREG(be16_to_cpu(dip->di_mode))) - return __this_address; - if (be64_to_cpu(dip->di_size) > - XFS_DFORK_SIZE(dip, mp, whichfork)) - return __this_address; - } + if (S_ISREG(mode) && whichfork == XFS_DATA_FORK) + return __this_address; if (di_nextents) return __this_address; break; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1f61e085676b..19008838df76 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -802,6 +802,7 @@ xfs_ialloc( xfs_buf_t **ialloc_context, xfs_inode_t **ipp) { + struct inode *dir = pip ? VFS_I(pip) : NULL; struct xfs_mount *mp = tp->t_mountp; xfs_ino_t ino; xfs_inode_t *ip; @@ -847,18 +848,17 @@ xfs_ialloc( return error; ASSERT(ip != NULL); inode = VFS_I(ip); - inode->i_mode = mode; set_nlink(inode, nlink); - inode->i_uid = current_fsuid(); inode->i_rdev = rdev; ip->i_d.di_projid = prid;
- if (pip && XFS_INHERIT_GID(pip)) { - inode->i_gid = VFS_I(pip)->i_gid; - if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode)) - inode->i_mode |= S_ISGID; + if (dir && !(dir->i_mode & S_ISGID) && + (mp->m_flags & XFS_MOUNT_GRPID)) { + inode->i_uid = current_fsuid(); + inode->i_gid = dir->i_gid; + inode->i_mode = mode; } else { - inode->i_gid = current_fsgid(); + inode_init_owner(inode, dir, mode); }
/* @@ -2669,14 +2669,13 @@ xfs_ifree_cluster( }
/* - * This is called to return an inode to the inode free list. - * The inode should already be truncated to 0 length and have - * no pages associated with it. This routine also assumes that - * the inode is already a part of the transaction. + * This is called to return an inode to the inode free list. The inode should + * already be truncated to 0 length and have no pages associated with it. This + * routine also assumes that the inode is already a part of the transaction. * - * The on-disk copy of the inode will have been added to the list - * of unlinked inodes in the AGI. We need to remove the inode from - * that list atomically with respect to freeing it here. + * The on-disk copy of the inode will have been added to the list of unlinked + * inodes in the AGI. We need to remove the inode from that list atomically with + * respect to freeing it here. */ int xfs_ifree( @@ -2694,13 +2693,16 @@ xfs_ifree( ASSERT(ip->i_d.di_nblocks == 0);
/* - * Pull the on-disk inode from the AGI unlinked list. + * Free the inode first so that we guarantee that the AGI lock is going + * to be taken before we remove the inode from the unlinked list. This + * makes the AGI lock -> unlinked list modification order the same as + * used in O_TMPFILE creation. */ - error = xfs_iunlink_remove(tp, ip); + error = xfs_difree(tp, ip->i_ino, &xic); if (error) return error;
- error = xfs_difree(tp, ip->i_ino, &xic); + error = xfs_iunlink_remove(tp, ip); if (error) return error;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index b68fca08be27..3088d94684c1 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -178,6 +178,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); +#ifdef CONFIG_INET +int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size); +#else +static inline int inet_gifconf(struct net_device *dev, char __user *buf, + int len, int size) +{ + return 0; +} +#endif void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 94871f12e536..896e563e2c18 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1489,6 +1489,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end);
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); + #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); #else diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6564fb4ac49e..ef75567efd27 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3201,14 +3201,6 @@ static inline bool dev_has_header(const struct net_device *dev) return dev->header_ops && dev->header_ops->create; }
-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, - int len, int size); -int register_gifconf(unsigned int family, gifconf_func_t *gifconf); -static inline int unregister_gifconf(unsigned int family) -{ - return register_gifconf(family, NULL); -} - #ifdef CONFIG_NET_FLOW_LIMIT #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ struct sd_flow_limit { diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 9c1292ea47fd..59a8caf3230a 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -300,6 +300,23 @@ struct uart_state { /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256
+/** + * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars + * @up: uart_port structure describing the port + * @chars: number of characters sent + * + * This function advances the tail of circular xmit buffer by the number of + * @chars transmitted and handles accounting of transmitted bytes (into + * @up's icount.tx). + */ +static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars) +{ + struct circ_buf *xmit = &up->state->xmit; + + xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1); + up->icount.tx += chars; +} + struct module; struct tty_driver;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index 1a28f299a4c6..895eae18271f 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -15,8 +15,6 @@ #define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW) #define AD_TIMER_INTERVAL 100 /*msec*/
-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} - #define AD_LACP_SLOW 0 #define AD_LACP_FAST 1
diff --git a/include/net/bonding.h b/include/net/bonding.h index 67d676059aa0..d9cc3f5602fb 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -763,6 +763,9 @@ extern struct rtnl_link_ops bond_link_ops; /* exported from bond_sysfs_slave.c */ extern const struct sysfs_ops slave_sysfs_ops;
+/* exported from bond_3ad.c */ +extern const u8 lacpdu_mcast_addr[]; + static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb) { atomic_long_inc(&dev->tx_dropped); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index fdf5fa4bf444..0cc2a62e88f9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3047,10 +3047,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) if (WARN_ON(!work->func)) return false;
- if (!from_cancel) { - lock_map_acquire(&work->lockdep_map); - lock_map_release(&work->lockdep_map); - } + lock_map_acquire(&work->lockdep_map); + lock_map_release(&work->lockdep_map);
if (start_flush_work(work, &barr, from_cancel)) { wait_for_completion(&barr.done); diff --git a/mm/slub.c b/mm/slub.c index b395ef064544..b0f637519ac9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5559,7 +5559,8 @@ static char *create_unique_id(struct kmem_cache *s) char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); char *p = name;
- BUG_ON(!name); + if (!name) + return ERR_PTR(-ENOMEM);
*p++ = ':'; /* @@ -5617,6 +5618,8 @@ static int sysfs_slab_add(struct kmem_cache *s) * for the symlinks. */ name = create_unique_id(s); + if (IS_ERR(name)) + return PTR_ERR(name); }
s->kobj.kset = kset; diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 310740cc684a..06b80b584381 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -999,8 +999,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, goto free_iterate; }
- if (repl->valid_hooks != t->valid_hooks) + if (repl->valid_hooks != t->valid_hooks) { + ret = -EINVAL; goto free_unlock; + }
if (repl->num_counters && repl->num_counters != t->private->nentries) { ret = -EINVAL; diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 54fb18b4f55e..993420da2930 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/kmod.h> #include <linux/netdevice.h> +#include <linux/inetdevice.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/net_tstamp.h> @@ -25,26 +26,6 @@ static int dev_ifname(struct net *net, struct ifreq *ifr) return netdev_get_name(net, ifr->ifr_name, ifr->ifr_ifindex); }
-static gifconf_func_t *gifconf_list[NPROTO]; - -/** - * register_gifconf - register a SIOCGIF handler - * @family: Address family - * @gifconf: Function handler - * - * Register protocol dependent address dumping routines. The handler - * that is passed must not be freed or reused until it has been replaced - * by another handler. - */ -int register_gifconf(unsigned int family, gifconf_func_t *gifconf) -{ - if (family >= NPROTO) - return -EINVAL; - gifconf_list[family] = gifconf; - return 0; -} -EXPORT_SYMBOL(register_gifconf); - /* * Perform a SIOCGIFCONF call. This structure will change * size eventually, and there is nothing I can do about it. @@ -57,7 +38,6 @@ int dev_ifconf(struct net *net, struct ifconf *ifc, int size) char __user *pos; int len; int total; - int i;
/* * Fetch the caller's info block. @@ -72,19 +52,15 @@ int dev_ifconf(struct net *net, struct ifconf *ifc, int size)
total = 0; for_each_netdev(net, dev) { - for (i = 0; i < NPROTO; i++) { - if (gifconf_list[i]) { - int done; - if (!pos) - done = gifconf_list[i](dev, NULL, 0, size); - else - done = gifconf_list[i](dev, pos + total, - len - total, size); - if (done < 0) - return -EFAULT; - total += done; - } - } + int done; + if (!pos) + done = inet_gifconf(dev, NULL, 0, size); + else + done = inet_gifconf(dev, pos + total, + len - total, size); + if (done < 0) + return -EFAULT; + total += done; }
/* diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index f9baa9b1c77f..ed120828c7e2 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1485,7 +1485,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow) } EXPORT_SYMBOL(flow_get_u32_dst);
-/* Sort the source and destination IP (and the ports if the IP are the same), +/* Sort the source and destination IP and the ports, * to have consistent hash within the two directions */ static inline void __flow_hash_consistentify(struct flow_keys *keys) @@ -1494,13 +1494,12 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
switch (keys->control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - addr_diff = (__force u32)keys->addrs.v4addrs.dst - - (__force u32)keys->addrs.v4addrs.src; - if ((addr_diff < 0) || - (addr_diff == 0 && - ((__force u16)keys->ports.dst < - (__force u16)keys->ports.src))) { + if ((__force u32)keys->addrs.v4addrs.dst < + (__force u32)keys->addrs.v4addrs.src) swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); + + if ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src) { swap(keys->ports.src, keys->ports.dst); } break; @@ -1508,13 +1507,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys) addr_diff = memcmp(&keys->addrs.v6addrs.dst, &keys->addrs.v6addrs.src, sizeof(keys->addrs.v6addrs.dst)); - if ((addr_diff < 0) || - (addr_diff == 0 && - ((__force u16)keys->ports.dst < - (__force u16)keys->ports.src))) { + if (addr_diff < 0) { for (i = 0; i < 4; i++) swap(keys->addrs.v6addrs.src.s6_addr32[i], keys->addrs.v6addrs.dst.s6_addr32[i]); + } + if ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src) { swap(keys->ports.src, keys->ports.dst); } break; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 8f1753875550..88b6120878cd 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1244,7 +1244,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr) return ret; }
-static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size) +int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size) { struct in_device *in_dev = __in_dev_get_rtnl(dev); const struct in_ifaddr *ifa; @@ -2766,8 +2766,6 @@ void __init devinet_init(void) INIT_HLIST_HEAD(&inet_addr_lst[i]);
register_pernet_subsys(&devinet_ops); - - register_gifconf(PF_INET, inet_gifconf); register_netdevice_notifier(&ip_netdev_notifier);
queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0); diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 26245419ef4a..65b5b05fe38d 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -148,15 +148,37 @@ static int help(struct sk_buff *skb, unsigned int protoff, data = ib_ptr; data_limit = ib_ptr + skb->len - dataoff;
- /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 - * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ - while (data < data_limit - (19 + MINMATCHLEN)) { - if (memcmp(data, "\1DCC ", 5)) { + /* Skip any whitespace */ + while (data < data_limit - 10) { + if (*data == ' ' || *data == '\r' || *data == '\n') + data++; + else + break; + } + + /* strlen("PRIVMSG x ")=10 */ + if (data < data_limit - 10) { + if (strncasecmp("PRIVMSG ", data, 8)) + goto out; + data += 8; + } + + /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26 + * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26 + */ + while (data < data_limit - (21 + MINMATCHLEN)) { + /* Find first " :", the start of message */ + if (memcmp(data, " :", 2)) { data++; continue; } + data += 2; + + /* then check that place only for the DCC command */ + if (memcmp(data, "\1DCC ", 5)) + goto out; data += 5; - /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ + /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */
iph = ip_hdr(skb); pr_debug("DCC found in master %pI4:%u %pI4:%u\n", @@ -172,7 +194,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, pr_debug("DCC %s detected\n", dccprotos[i]);
/* we have at least - * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid + * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid * data left (== 14/13 bytes) */ if (parse_dcc(data, data_limit, &dcc_ip, &dcc_port, &addr_beg_p, &addr_end_p)) { diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index b83dc9bf0a5d..78fd9122b70c 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, return ret; if (ret == 0) break; - dataoff += *matchoff; + dataoff = *matchoff; } *in_header = 0; } @@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, break; if (ret == 0) return ret; - dataoff += *matchoff; + dataoff = *matchoff; }
if (in_header) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b8e7e1c5c08a..810995d712ac 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2001,7 +2001,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, u8 policy, u32 flags) { const struct nlattr * const *nla = ctx->nla; - struct nft_stats __percpu *stats = NULL; struct nft_table *table = ctx->table; struct nft_base_chain *basechain; struct net *net = ctx->net; @@ -2015,6 +2014,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, return -EOVERFLOW;
if (nla[NFTA_CHAIN_HOOK]) { + struct nft_stats __percpu *stats = NULL; struct nft_chain_hook hook;
if (flags & NFT_CHAIN_BINDING) @@ -2045,8 +2045,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (err < 0) { nft_chain_release_hook(&hook); kfree(basechain); + free_percpu(stats); return err; } + if (stats) + static_branch_inc(&nft_counters_enabled); } else { if (flags & NFT_CHAIN_BASE) return -EINVAL; @@ -2121,9 +2124,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, goto err_unregister_hook; }
- if (stats) - static_branch_inc(&nft_counters_enabled); - table->use++;
return 0; diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index 79fbf37291f3..51e3953b414c 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb, struct nf_osf_hdr_ctx ctx; const struct tcphdr *tcp; struct tcphdr _tcph; + bool found = false;
memset(&ctx, 0, sizeof(ctx));
@@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb,
data->genre = f->genre; data->version = f->version; + found = true; break; }
- return true; + return found; } EXPORT_SYMBOL_GPL(nf_osf_find);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index b8ffb7e4f696..c410a736301b 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -2124,6 +2124,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, }
if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { + tfilter_put(tp, fh); NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); err = -EINVAL; goto errout; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index eca525791013..ab8835a72cee 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -65,6 +65,7 @@ struct taprio_sched { u32 flags; enum tk_offsets tk_offset; int clockid; + bool offloaded; atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ * speeds it's sub-nanoseconds per byte */ @@ -1267,6 +1268,8 @@ static int taprio_enable_offload(struct net_device *dev, goto done; }
+ q->offloaded = true; + done: taprio_offload_free(offload);
@@ -1281,12 +1284,9 @@ static int taprio_disable_offload(struct net_device *dev, struct tc_taprio_qopt_offload *offload; int err;
- if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) + if (!q->offloaded) return 0;
- if (!ops->ndo_setup_tc) - return -EOPNOTSUPP; - offload = taprio_offload_alloc(0); if (!offload) { NL_SET_ERR_MSG(extack, @@ -1302,6 +1302,8 @@ static int taprio_disable_offload(struct net_device *dev, goto out; }
+ q->offloaded = false; + out: taprio_offload_free(offload);
@@ -1904,12 +1906,14 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) { - struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + unsigned int ntx = cl - 1;
- if (!dev_queue) + if (ntx >= dev->num_tx_queues) return NULL;
- return dev_queue->qdisc_sleeping; + return q->qdiscs[ntx]; }
static unsigned long taprio_find(struct Qdisc *sch, u32 classid) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index ef2fd28999ba..bf485a2017a4 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1584,7 +1584,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, static int smcr_buf_map_usable_links(struct smc_link_group *lgr, struct smc_buf_desc *buf_desc, bool is_rmb) { - int i, rc = 0; + int i, rc = 0, cnt = 0;
/* protect against parallel link reconfiguration */ mutex_lock(&lgr->llc_conf_mutex); @@ -1597,9 +1597,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr, rc = -ENOMEM; goto out; } + cnt++; } out: mutex_unlock(&lgr->llc_conf_mutex); + if (!rc && !cnt) + rc = -EINVAL; return rc; }
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 600ea241ead7..79b8d4258fd3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2584,6 +2584,8 @@ static const struct pci_device_id azx_ids[] = { /* 5 Series/3400 */ { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, + { PCI_DEVICE(0x8086, 0x3b57), + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, /* Poulsbo */ { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE }, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 71e11481ba41..7551cdf3b452 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3839,6 +3839,7 @@ static int patch_tegra_hdmi(struct hda_codec *codec) if (err) return err;
+ codec->depop_delay = 10; codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; spec = codec->spec; spec->chmap.ops.chmap_cea_alloc_validate_get_type = diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 78f4f684a3c7..574fe798d512 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6824,6 +6824,8 @@ enum { ALC294_FIXUP_ASUS_GU502_HP, ALC294_FIXUP_ASUS_GU502_PINS, ALC294_FIXUP_ASUS_GU502_VERBS, + ALC294_FIXUP_ASUS_G513_PINS, + ALC285_FIXUP_ASUS_G533Z_PINS, ALC285_FIXUP_HP_GPIO_LED, ALC285_FIXUP_HP_MUTE_LED, ALC236_FIXUP_HP_GPIO_LED, @@ -8149,6 +8151,24 @@ static const struct hda_fixup alc269_fixups[] = { [ALC294_FIXUP_ASUS_GU502_HP] = { .type = HDA_FIXUP_FUNC, .v.func = alc294_fixup_gu502_hp, + }, + [ALC294_FIXUP_ASUS_G513_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11050 }, /* front HP mic */ + { 0x1a, 0x03a11c30 }, /* rear external mic */ + { 0x21, 0x03211420 }, /* front HP out */ + { } + }, + }, + [ALC285_FIXUP_ASUS_G533Z_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x90170120 }, + { } + }, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_G513_PINS, }, [ALC294_FIXUP_ASUS_COEF_1B] = { .type = HDA_FIXUP_VERBS, @@ -8754,6 +8774,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), @@ -8769,6 +8790,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -8912,10 +8934,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), + SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), + SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), - SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), @@ -8930,14 +8953,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), + SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS), SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), @@ -9134,6 +9159,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), + SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20), SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI), SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101), diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c index 953338b9e887..02cd9f75e3d2 100644 --- a/tools/perf/util/genelf.c +++ b/tools/perf/util/genelf.c @@ -251,6 +251,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, Elf_Data *d; Elf_Scn *scn; Elf_Ehdr *ehdr; + Elf_Phdr *phdr; Elf_Shdr *shdr; uint64_t eh_frame_base_offset; char *strsym = NULL; @@ -285,6 +286,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, ehdr->e_version = EV_CURRENT; ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
+ /* + * setup program header + */ + phdr = elf_newphdr(e, 1); + phdr[0].p_type = PT_LOAD; + phdr[0].p_offset = 0; + phdr[0].p_vaddr = 0; + phdr[0].p_paddr = 0; + phdr[0].p_filesz = csize; + phdr[0].p_memsz = csize; + phdr[0].p_flags = PF_X | PF_R; + phdr[0].p_align = 8; + /* * setup text section */ diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h index d4137559be05..ac638945b4cb 100644 --- a/tools/perf/util/genelf.h +++ b/tools/perf/util/genelf.h @@ -50,8 +50,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#if GEN_ELF_CLASS == ELFCLASS64 #define elf_newehdr elf64_newehdr +#define elf_newphdr elf64_newphdr #define elf_getshdr elf64_getshdr #define Elf_Ehdr Elf64_Ehdr +#define Elf_Phdr Elf64_Phdr #define Elf_Shdr Elf64_Shdr #define Elf_Sym Elf64_Sym #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a) @@ -59,8 +61,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent #define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a) #else #define elf_newehdr elf32_newehdr +#define elf_newphdr elf32_newphdr #define elf_getshdr elf32_getshdr #define Elf_Ehdr Elf32_Ehdr +#define Elf_Phdr Elf32_Phdr #define Elf_Shdr Elf32_Shdr #define Elf_Sym Elf32_Sym #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index d8d79a9ec775..3e423a920015 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -2002,8 +2002,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, * unusual. One significant peculiarity is that the mapping (start -> pgoff) * is not the same for the kernel map and the modules map. That happens because * the data is copied adjacently whereas the original kcore has gaps. Finally, - * kallsyms and modules files are compared with their copies to check that - * modules have not been loaded or unloaded while the copies were taking place. + * kallsyms file is compared with its copy to check that modules have not been + * loaded or unloaded while the copies were taking place. * * Return: %0 on success, %-1 on failure. */ @@ -2066,9 +2066,6 @@ int kcore_copy(const char *from_dir, const char *to_dir) goto out_extract_close; }
- if (kcore_copy__compare_file(from_dir, to_dir, "modules")) - goto out_extract_close; - if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) goto out_extract_close;
diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh index e714bae473fb..81f31179ac88 100755 --- a/tools/testing/selftests/net/forwarding/sch_red.sh +++ b/tools/testing/selftests/net/forwarding/sch_red.sh @@ -1,3 +1,4 @@ +#!/bin/bash # SPDX-License-Identifier: GPL-2.0
# This test sends one stream of traffic from H1 through a TBF shaper, to a RED diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 578235291e92..c4cce817a452 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -159,6 +159,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, { }
+__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ +} + bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) { /* @@ -340,6 +344,12 @@ void kvm_reload_remote_mmus(struct kvm *kvm) kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); }
+static void kvm_flush_shadow_all(struct kvm *kvm) +{ + kvm_arch_flush_shadow_all(kvm); + kvm_arch_guest_memory_reclaimed(kvm); +} + #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, gfp_t gfp_flags) @@ -489,6 +499,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock); + kvm_arch_guest_memory_reclaimed(kvm); srcu_read_unlock(&kvm->srcu, idx);
return 0; @@ -592,7 +603,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, int idx;
idx = srcu_read_lock(&kvm->srcu); - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); }
@@ -896,7 +907,7 @@ static void kvm_destroy_vm(struct kvm *kvm) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #else - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); @@ -1238,6 +1249,7 @@ static int kvm_set_memslot(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_root) */ kvm_arch_flush_shadow_memslot(kvm, slot); + kvm_arch_guest_memory_reclaimed(kvm); }
r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
linux-stable-mirror@lists.linaro.org