I'm announcing the release of the 5.11.14 kernel.
All users of the 5.11 kernel series must upgrade.
The updated 5.11.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-5.11.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/devicetree/bindings/net/ethernet-controller.yaml | 2 Makefile | 2 arch/arm/boot/dts/armada-385-turris-omnia.dts | 4 arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi | 2 arch/arm/mach-omap2/omap-secure.c | 39 ++ arch/arm/mach-omap2/omap-secure.h | 1 arch/arm/mach-omap2/pmic-cpcap.c | 4 arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h | 2 arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h | 2 arch/arm64/boot/dts/marvell/armada-cp11x.dtsi | 6 arch/ia64/include/asm/ptrace.h | 8 arch/nds32/mm/cacheflush.c | 2 arch/parisc/include/asm/cmpxchg.h | 2 arch/powerpc/kernel/Makefile | 4 arch/powerpc/kernel/ptrace/Makefile | 4 arch/powerpc/kernel/ptrace/ptrace-decl.h | 14 arch/powerpc/kernel/ptrace/ptrace-fpu.c | 10 arch/powerpc/kernel/ptrace/ptrace-novsx.c | 8 arch/powerpc/kernel/ptrace/ptrace-view.c | 2 arch/s390/kernel/cpcmd.c | 6 arch/x86/include/asm/smp.h | 2 arch/x86/kernel/smpboot.c | 26 - arch/x86/kernel/traps.c | 4 arch/x86/kvm/mmu/mmu.c | 13 arch/x86/kvm/mmu/tdp_iter.c | 30 - arch/x86/kvm/mmu/tdp_iter.h | 11 arch/x86/kvm/mmu/tdp_mmu.c | 99 +++-- arch/x86/kvm/mmu/tdp_mmu.h | 18 drivers/acpi/processor_idle.c | 4 drivers/base/dd.c | 8 drivers/char/agp/Kconfig | 2 drivers/clk/clk.c | 47 +- drivers/clk/qcom/camcc-sc7180.c | 50 +- drivers/clk/socfpga/clk-gate.c | 2 drivers/gpio/gpiolib.c | 12 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c | 3 drivers/gpu/drm/i915/display/intel_acpi.c | 22 + drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 6 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c | 4 drivers/gpu/drm/msm/msm_drv.c | 1 drivers/gpu/drm/radeon/radeon_ttm.c | 4 drivers/gpu/drm/vc4/vc4_crtc.c | 17 drivers/i2c/busses/i2c-designware-master.c | 1 drivers/i2c/busses/i2c-jz4780.c | 4 drivers/i2c/i2c-core-base.c | 7 drivers/infiniband/core/addr.c | 4 drivers/infiniband/hw/cxgb4/cm.c | 3 drivers/infiniband/hw/hfi1/affinity.c | 21 - drivers/infiniband/hw/hfi1/hfi.h | 1 drivers/infiniband/hw/hfi1/init.c | 10 drivers/infiniband/hw/hfi1/netdev_rx.c | 3 drivers/infiniband/hw/qedr/verbs.c | 3 drivers/infiniband/ulp/rtrs/rtrs-clt.c | 2 drivers/net/can/spi/mcp251x.c | 24 - drivers/net/can/usb/peak_usb/pcan_usb_core.c | 6 drivers/net/dsa/lantiq_gswip.c | 195 ++++++++-- drivers/net/ethernet/amd/xgbe/xgbe.h | 6 drivers/net/ethernet/cadence/macb_main.c | 7 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 23 - drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 3 drivers/net/ethernet/freescale/gianfar.c | 6 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 4 drivers/net/ethernet/intel/i40e/i40e.h | 1 drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 3 drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 55 ++ drivers/net/ethernet/intel/i40e/i40e_main.c | 11 drivers/net/ethernet/intel/i40e/i40e_txrx.c | 12 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 9 drivers/net/ethernet/intel/i40e/i40e_xsk.c | 4 drivers/net/ethernet/intel/ice/ice.h | 4 drivers/net/ethernet/intel/ice/ice_common.c | 2 drivers/net/ethernet/intel/ice/ice_controlq.h | 4 drivers/net/ethernet/intel/ice/ice_dcb.c | 76 ++- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 47 +- drivers/net/ethernet/intel/ice/ice_dcb_nl.c | 52 +- drivers/net/ethernet/intel/ice/ice_ethtool.c | 8 drivers/net/ethernet/intel/ice/ice_lib.c | 7 drivers/net/ethernet/intel/ice/ice_main.c | 53 ++ drivers/net/ethernet/intel/ice/ice_switch.c | 15 drivers/net/ethernet/intel/ice/ice_txrx.c | 2 drivers/net/ethernet/intel/ice/ice_type.h | 17 drivers/net/ethernet/mellanox/mlx5/core/dev.c | 4 drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 36 + drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 6 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c | 18 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 22 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 21 + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 5 drivers/net/ethernet/mellanox/mlx5/core/eq.c | 13 drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 15 drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c | 7 drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c | 7 drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 2 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c | 1 drivers/net/ethernet/netronome/nfp/flower/main.h | 8 drivers/net/ethernet/netronome/nfp/flower/metadata.c | 16 drivers/net/ethernet/netronome/nfp/flower/offload.c | 48 ++ drivers/net/geneve.c | 24 + drivers/net/ieee802154/atusb.c | 1 drivers/net/phy/bcm-phy-lib.c | 13 drivers/net/tun.c | 48 ++ drivers/net/usb/hso.c | 33 - drivers/net/vxlan.c | 18 drivers/net/wan/hdlc_fr.c | 5 drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 2 drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c | 31 - drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c | 3 drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c | 35 + drivers/of/property.c | 11 drivers/platform/x86/intel-hid.c | 16 drivers/ras/cec.c | 15 drivers/regulator/bd9571mwv-regulator.c | 4 drivers/remoteproc/pru_rproc.c | 2 drivers/remoteproc/qcom_pil_info.c | 2 drivers/scsi/pm8001/pm8001_hwi.c | 8 drivers/scsi/ufs/ufshcd.c | 31 - drivers/soc/fsl/qbman/qman.c | 2 drivers/target/iscsi/iscsi_target.c | 3 drivers/thunderbolt/retimer.c | 4 drivers/usb/usbip/stub_dev.c | 11 drivers/usb/usbip/usbip_common.h | 3 drivers/usb/usbip/usbip_event.c | 2 drivers/usb/usbip/vhci_hcd.c | 1 drivers/usb/usbip/vhci_sysfs.c | 30 + drivers/usb/usbip/vudc_dev.c | 1 drivers/usb/usbip/vudc_sysfs.c | 5 drivers/vdpa/mlx5/core/mlx5_vdpa.h | 4 drivers/vdpa/mlx5/net/mlx5_vnet.c | 40 +- drivers/xen/events/events_base.c | 12 fs/cifs/Kconfig | 3 fs/cifs/Makefile | 5 fs/cifs/cifsfs.c | 3 fs/cifs/connect.c | 17 fs/direct-io.c | 5 fs/file.c | 21 - fs/hostfs/hostfs_kern.c | 7 fs/namei.c | 8 fs/ocfs2/aops.c | 11 fs/ocfs2/file.c | 8 include/linux/avf/virtchnl.h | 2 include/linux/mlx5/mlx5_ifc.h | 10 include/linux/skmsg.h | 7 include/linux/virtio_net.h | 2 include/net/act_api.h | 12 include/net/netns/xfrm.h | 4 include/net/red.h | 4 include/net/sock.h | 9 include/net/xfrm.h | 4 include/uapi/linux/can.h | 2 include/uapi/linux/rfkill.h | 80 +++- kernel/bpf/inode.c | 2 kernel/bpf/stackmap.c | 12 kernel/bpf/verifier.c | 5 kernel/gcov/clang.c | 29 - kernel/locking/lockdep.c | 2 kernel/workqueue.c | 2 mm/percpu-internal.h | 2 mm/percpu-stats.c | 9 mm/percpu.c | 14 net/batman-adv/translation-table.c | 2 net/can/bcm.c | 10 net/can/isotp.c | 11 net/can/raw.c | 14 net/core/skmsg.c | 12 net/core/sock.c | 12 net/core/xdp.c | 3 net/dsa/dsa2.c | 8 net/ethtool/eee.c | 4 net/hsr/hsr_device.c | 1 net/hsr/hsr_forward.c | 6 net/ieee802154/nl-mac.c | 7 net/ieee802154/nl802154.c | 23 - net/ipv4/ah4.c | 2 net/ipv4/esp4.c | 2 net/ipv4/esp4_offload.c | 17 net/ipv4/udp.c | 4 net/ipv6/ah6.c | 2 net/ipv6/esp6.c | 2 net/ipv6/esp6_offload.c | 17 net/ipv6/raw.c | 2 net/ipv6/route.c | 8 net/mac80211/mlme.c | 5 net/mac80211/tx.c | 2 net/mac802154/llsec.c | 2 net/mptcp/protocol.c | 100 ++--- net/ncsi/ncsi-manage.c | 20 - net/nfc/llcp_sock.c | 10 net/openvswitch/conntrack.c | 8 net/qrtr/qrtr.c | 5 net/rds/message.c | 3 net/rfkill/core.c | 7 net/sched/act_api.c | 48 +- net/sched/cls_api.c | 16 net/sched/sch_teql.c | 3 net/sctp/ipv6.c | 7 net/tipc/crypto.c | 3 net/tipc/socket.c | 2 net/wireless/nl80211.c | 10 net/wireless/scan.c | 14 net/wireless/sme.c | 2 net/xfrm/xfrm_compat.c | 12 net/xfrm/xfrm_device.c | 2 net/xfrm/xfrm_interface.c | 3 net/xfrm/xfrm_output.c | 10 net/xfrm/xfrm_state.c | 10 security/selinux/ss/avtab.c | 101 +---- security/selinux/ss/avtab.h | 2 security/selinux/ss/conditional.c | 12 security/selinux/ss/services.c | 157 ++++++-- security/selinux/ss/sidtab.c | 21 + security/selinux/ss/sidtab.h | 4 sound/drivers/aloop.c | 11 sound/pci/hda/patch_conexant.c | 1 sound/pci/hda/patch_realtek.c | 16 sound/soc/codecs/wm8960.c | 8 sound/soc/intel/atom/sst-mfld-platform-pcm.c | 6 sound/soc/sof/intel/hda-dsp.c | 15 sound/soc/sunxi/sun4i-codec.c | 5 tools/lib/bpf/ringbuf.c | 2 tools/lib/bpf/xsk.c | 57 +- tools/perf/builtin-inject.c | 2 tools/perf/util/block-info.c | 6 224 files changed, 2058 insertions(+), 1018 deletions(-)
Adrian Hunter (1): perf inject: Fix repipe usage
Ahmed S. Darwish (1): net: xfrm: Localize sequence counter per network namespace
Al Viro (2): LOOKUP_MOUNTPOINT: we are cleaning "jumped" flag too late hostfs: fix memory handling in follow_link()
Alex Deucher (1): drm/amdgpu/smu7: fix CAC setting on TOPAZ
Alexander Aring (8): net: ieee802154: nl-mac: fix check on panid net: ieee802154: fix nl802154 del llsec key net: ieee802154: fix nl802154 del llsec dev net: ieee802154: fix nl802154 add llsec key net: ieee802154: fix nl802154 del llsec devkey net: ieee802154: forbid monitor for set llsec params net: ieee802154: forbid monitor for del llsec seclevel net: ieee802154: stop dump llsec params for monitors
Alexander Gordeev (1): s390/cpcmd: fix inline assembly register clobbering
Andy Shevchenko (2): i2c: designware: Adjust bus_freq_hz when refuse high speed mode set gpiolib: Read "gpio-line-names" from a firmware node
Anirudh Rayabharam (1): net: hso: fix null-ptr-deref during tty device unregistration
Anirudh Venkataramanan (2): ice: Continue probe on link/PHY errors ice: Use port number instead of PF ID for WoL
Antoine Tenart (2): vxlan: do not modify the shared tunnel info when PMTU triggers an ICMP reply geneve: do not modify the shared tunnel info when PMTU triggers an ICMP reply
Ariel Levkovich (1): net/mlx5e: Fix mapping of ct_label zero
Arkadiusz Kubalewski (4): i40e: Fix sparse warning: missing error code 'err' i40e: Fix sparse error: 'vsi->netdev' could be null i40e: Fix sparse error: uninitialized symbol 'ring' i40e: Fix sparse errors in i40e_txrx.c
Arnd Bergmann (3): remoteproc: qcom: pil_info: avoid 64-bit division soc/fsl: qbman: fix conflicting alignment attributes lockdep: Address clang -Wformat warning printing for %hd
Aya Levin (3): net/mlx5e: Fix ethtool indication of connector type net/mlx5: Fix PPLM register mapping net/mlx5: Fix PBMC register mapping
Bastian Germann (1): ASoC: sunxi: sun4i-codec: fill ASoC card owner
Ben Gardon (5): KVM: x86/mmu: change TDP MMU yield function returns to match cond_resched KVM: x86/mmu: Merge flush and non-flush tdp_mmu_iter_cond_resched KVM: x86/mmu: Rename goal_gfn to next_last_level_gfn KVM: x86/mmu: Ensure forward progress when yielding in TDP MMU iter KVM: x86/mmu: Yield in TDU MMU iter even if no SPTES changed
Ben Greear (1): mac80211: fix time-is-after bug in mlme
Bruce Allan (1): ice: fix memory allocation call
Can Guo (2): scsi: ufs: core: Fix task management request completion timeout scsi: ufs: core: Fix wrong Task Tag used in task management request UPIUs
Carlos Leija (1): ARM: OMAP4: PM: update ROM return address for OSWR and OFF
Chinh T Cao (2): ice: Refactor DCB related variables out of the ice_port_info struct ice: Recognize 860 as iSCSI port in CEE mode
Christian Brauner (1): file: fix close_range() for unshare+cloexec
Christophe Leroy (2): powerpc/vdso: Make sure vdso_wrapper.o is rebuilt everytime vdso.so is rebuilt powerpc/ptrace: Don't return error when getting/setting FP regs without CONFIG_PPC_FPU_REGS
Ciara Loftus (3): libbpf: Ensure umem pointer is non-NULL before dereferencing libbpf: Restore umem state after socket create failure libbpf: Only create rx and tx XDP rings when necessary
Claudiu Beznea (1): net: macb: restore cmp registers on resume path
Claudiu Manoil (1): gianfar: Handle error code at MAC address change
Dan Carpenter (2): thunderbolt: Fix a leak in tb_retimer_add() thunderbolt: Fix off by one in tb_port_find_retimer()
Daniel Jurgens (1): net/mlx5: Don't request more than supported EQs
Dave Ertman (1): ice: remove DCBNL_DEVRESET bit from PF state
Dave Marchevsky (1): bpf: Refcount task stack in bpf_get_task_stack
Dmitry Baryshkov (1): drm/msm: a6xx: fix version check for the A650 SQE microcode
Dmitry Safonov (1): xfrm/compat: Cleanup WARN()s that can be user-triggered
Dom Cobley (1): drm/vc4: crtc: Reduce PV fifo threshold on hvs4
Du Cheng (1): cfg80211: remove WARN_ON() in cfg80211_sme_connect
Eli Cohen (3): vdpa/mlx5: Fix suspend/resume index restoration net/mlx5: Fix HW spec violation configuring uplink vdpa/mlx5: Fix wrong use of bit numbers
Eric Dumazet (2): net: ensure mac header is set in virtio_net_hdr_to_skb() sch_red: fix off-by-one checks in red_check_params()
Eryk Rybak (2): i40e: Fix kernel oops when i40e driver removes VF's i40e: Fix display statistics for veb_tc
Evan Nimmo (1): xfrm: Use actual socket sk instead of skb socket for xfrm_output_resume
Eyal Birger (1): xfrm: interface: fix ipv4 pmtu check to honor ip header df
Fabio Pricoco (1): ice: Increase control queue timeout
Florian Fainelli (1): net: phy: broadcom: Only advertise EEE for supported modes
Gao Xiang (1): parisc: avoid a warning on u8 cast for cmpxchg on u8 pointers
Geert Uytterhoeven (1): regulator: bd9571mwv: Fix AVS and DVFS voltage range
Greg Kroah-Hartman (1): Linux 5.11.14
Gregory CLEMENT (1): Revert "arm64: dts: marvell: armada-cp110: Switch to per-port SATA interrupts"
Grzegorz Siwik (1): i40e: Fix parameters in aq_get_phy_register()
Guangbin Huang (1): net: hns3: clear VF down state bit before request link status
Guennadi Liakhovetski (1): ASoC: SOF: Intel: HDA: fix core status verification
Hans de Goede (2): ASoC: intel: atom: Stop advertising non working S24LE support platform/x86: intel-hid: Fix spurious wakeups caused by tablet-mode events during suspend
Helge Deller (1): parisc: parisc-agp requires SBA IOMMU driver
Ido Schimmel (1): mlxsw: spectrum: Fix ECN marking in tunnel decapsulation
Ilya Lipnitskiy (1): of: property: fw_devlink: do not link ".*,nr-gpios"
Ilya Maximets (1): openvswitch: fix send of uninitialized stack memory in ct limit reply
Jacek Bułatek (1): ice: Fix for dereference of NULL pointer
Jack Qiu (1): fs: direct-io: fix missing sdio->boundary
Jin Yao (1): perf report: Fix wrong LBR block sorting
Johannes Berg (6): rfkill: revert back to old userspace API by default iwlwifi: pcie: properly set LTR workarounds on 22000 devices nl80211: fix beacon head validation nl80211: fix potential leak of ACL params cfg80211: check S1G beacon compat element length mac80211: fix TXQ AC confusion
John Fastabend (2): bpf, sockmap: Fix sk->prot unhash op reset bpf, sockmap: Fix incorrect fwd_alloc accounting
Jonas Holmberg (1): ALSA: aloop: Fix initialization of controls
Kalyan Thota (1): drm/msm/disp/dpu1: program 3d_merge only if block is attached
Kamal Heib (1): RDMA/qedr: Fix kernel panic when trying to access recv_cq
Krzysztof Goreczny (1): ice: prevent ice_open and ice_stop during reset
Krzysztof Kozlowski (1): clk: socfpga: fix iomem pointer cast on 64-bit
Kumar Kartikeya Dwivedi (1): net: sched: bump refcount for new action in ACT replace mode
Kurt Kanzenbach (1): net: hsr: Reset MAC header for Tx path
Leon Romanovsky (1): RDMA/addr: Be strict with gid size
Loic Poulain (1): net: qrtr: Fix memory leak on qrtr_tx_wait failure
Lorenz Bauer (1): bpf: link: Refuse non-O_RDWR flags in BPF_OBJ_GET
Luca Coelho (1): iwlwifi: fix 11ax disabled bit in the regulatory capability flags
Luca Fancellu (1): xen/evtchn: Change irq_info lock to raw_spinlock_t
Lukasz Bartosik (2): clk: fix invalid usage of list cursor in register clk: fix invalid usage of list cursor in unregister
Lv Yunlong (5): ethernet/netronome/nfp: Fix a use after free in nfp_bpf_ctrl_msg_rx drivers/net/wan/hdlc_fr: Fix a double free in pvc_xmit ethernet: myri10ge: Fix a use after free in myri10ge_sw_tso net:tipc: Fix a double free in tipc_sk_mcast_rcv net/rds: Fix a use after free in rds_message_map_pages
Maciej Żenczykowski (1): net-ipv6: bugfix - raw & sctp - switch to ipv6_can_nonlocal_bind()
Maciek Borzecki (1): cifs: escape spaces in share names
Magnus Karlsson (1): i40e: fix receiving of single packets in xsk zero-copy mode
Maor Dickman (1): net/mlx5: Delete auxiliary bus driver eth-rep first
Marc Kleine-Budde (2): can: uapi: can.h: mark union inside struct can_frame packed can: mcp251x: fix support for half duplex SPI host controllers
Marek Behún (1): ARM: dts: turris-omnia: configure LED[2]/INTn pin as interrupt pin
Martin Blumenstingl (3): net: dsa: lantiq_gswip: Let GSWIP automatically set the xMII clock net: dsa: lantiq_gswip: Don't use PHY auto polling net: dsa: lantiq_gswip: Configure all remaining GSWIP_MII_CFG bits
Mateusz Palczewski (1): i40e: Added Asym_Pause to supported link modes
Maxim Kochetkov (1): net: dsa: Fix type was not set for devlink port
Md Haris Iqbal (1): RDMA/rtrs-clt: Close rtrs client conn before destroying rtrs clt session files
Mike Marciniszyn (1): IB/hfi1: Fix probe time panic when AIP is enabled with a buggy BIOS
Mike Rapoport (1): nds32: flush_dcache_page: use page_mapping_file to avoid races with swapoff
Milton Miller (1): net/ncsi: Avoid channel_monitor hrtimer deadlock
Muhammad Usama Anjum (1): net: ipv6: check for validity before dereferencing cfg->fc_nlinfo.nlh
Nick Desaulniers (1): gcov: re-fix clang-11+ support
Norbert Ciosek (1): virtchnl: Fix layout of RSS structures
Norman Maurer (1): net: udp: Add support for getsockopt(..., ..., UDP_GRO, ..., ...);
Oliver Hartkopp (2): can: bcm/raw: fix msg_namelen values depending on CAN_REQUIRED_SIZE can: isotp: fix msg_namelen values depending on CAN_REQUIRED_SIZE
Oliver Stäbler (1): arm64: dts: imx8mm/q: Fix pad control of SD1_DATA0
Ondrej Mosnacek (3): selinux: make nslot handling in avtab more robust selinux: fix cond_list corruption when changing booleans selinux: fix race between old and new sidtab
Ong Boon Leong (1): xdp: fix xdp_return_frame() kernel BUG throw for page_pool memory model
Paolo Abeni (3): net: let skb_orphan_partial wake-up waiters. mptcp: forbit mcast-related sockopt on MPTCP sockets mptcp: revert "mptcp: provide subflow aware release function"
Paolo Bonzini (1): KVM: x86/mmu: preserve pending TLB flush across calls to kvm_tdp_mmu_zap_sp
Pavel Skripkin (3): drivers: net: fix memory leak in atusb_probe drivers: net: fix memory leak in peak_usb_create_dev net: mac802154: Fix general protection fault
Pavel Tikhomirov (1): net: sched: sch_teql: fix null-pointer dereference
Pedro Tammela (1): libbpf: Fix bail out from 'ringbuf_process_ring()' on error
Phillip Potter (1): net: tun: set tun->dev->addr_len during TUNSETLINK processing
Potnuri Bharat Teja (1): RDMA/cxgb4: check for ipv6 address properly while destroying listener
Raed Salem (1): net/mlx5: Fix placement of log_max_flow_counter
Rafał Miłecki (1): dt-bindings: net: ethernet-controller: fix typo in NVMEM
Rahul Lakkireddy (1): cxgb4: avoid collecting SGE_QBASE regs during traffic
Robert Malz (1): ice: Cleanup fltr list in case of allocation issues
Roman Bolshakov (1): scsi: target: iscsi: Fix zero tag inside a trace event
Roman Gushchin (1): percpu: make pcpu_nr_empty_pop_pages per chunk type
Rui Salvaterra (1): ARM: dts: turris-omnia: fix hardware buffer management
Saravana Kannan (1): driver core: Fix locking bug in deferred_probe_timeout_work_func()
Sean Christopherson (3): KVM: x86/mmu: Ensure TLBs are flushed when yielding during GFN range zap KVM: x86/mmu: Ensure TLBs are flushed for TDP MMU during NX zapping KVM: x86/mmu: Don't allow TDP MMU to yield when recovering NX pages
Sergei Trofimovich (1): ia64: fix user_stack_pointer() for ptrace()
Shengjiu Wang (1): ASoC: wm8960: Fix wrong bclk and lrclk with pll enabled for some chips
Shuah Khan (4): usbip: add sysfs_lock to synchronize sysfs code paths usbip: stub-dev synchronize sysfs code paths usbip: vudc synchronize sysfs code paths usbip: synchronize event handler with sysfs code paths
Shyam Prasad N (1): cifs: On cifs_reconnect, resolve the hostname again.
Shyam Sundar S K (1): amd-xgbe: Update DMA coherency values
Si-Wei Liu (1): vdpa/mlx5: should exclude header length and fcs from mtu
Stefan Riedmueller (1): ARM: dts: imx6: pbab01: Set vmmc supply for both SD interfaces
Steffen Klassert (2): xfrm: Fix NULL pointer dereference on policy lookup xfrm: Provide private skb extensions for segmented and hw offloaded ESP packets
Stephen Boyd (1): drm/msm: Set drvdata to NULL when msm_drm_init() fails
Suman Anna (1): remoteproc: pru: Fix firmware loading crashes on K3 SoCs
Takashi Iwai (3): ALSA: hda/realtek: Fix speaker amp setup on Acer Aspire E1 ALSA: hda/conexant: Apply quirk for another HP ZBook G5 model drm/i915: Fix invalid access to ACPI _DSM objects
Taniya Das (1): clk: qcom: camcc: Update the clock ops for the SC7180
Tariq Toukan (1): net/mlx5e: Guarantee room for XSK wakeup NOP on async ICOSQ
Tetsuo Handa (1): batman-adv: initialize "struct batadv_tvlv_tt_vlan_data"->reserved field
Thomas Tai (1): x86/traps: Correct exc_general_protection() and math_error() return paths
Toke Høiland-Jørgensen (1): bpf: Enforce that struct_ops programs be GPL-only
Tony Lindgren (1): ARM: OMAP4: Fix PMIC voltage domains for bionic
Viswas G (1): scsi: pm80xx: Fix chip initialization failure
Vitaly Kuznetsov (1): ACPI: processor: Fix build when CONFIG_ACPI_PROCESSOR=m
Vlad Buslov (3): net: sched: fix action overwrite reference counting net: sched: fix err handler in tcf_action_init() Revert "net: sched: bump refcount for new action in ACT replace mode"
Wengang Wang (1): ocfs2: fix deadlock between setattr and dio_end_io_write
William Roche (1): RAS/CEC: Correct ce_add_elem()'s returned values
Wolfram Sang (1): i2c: turn recovery error on init to debug
Wong Vee Khee (1): ethtool: fix incorrect datatype in set_eee ops
Xiaoming Ni (4): nfc: fix refcount leak in llcp_sock_bind() nfc: fix refcount leak in llcp_sock_connect() nfc: fix memory leak in llcp_sock_connect() nfc: Avoid endless loops caused by repeated llcp_sock_connect()
Xin Long (2): esp: delete NETIF_F_SCTP_CRC bit from features for esp offload tipc: increment the tmp aead refcnt before attaching it
Yinjun Zhang (1): nfp: flower: ignore duplicate merge hints from FW
Yongxin Liu (1): ice: fix memory leak of aRFS after resuming from suspend
Yunjian Wang (1): net: cls_api: Fix uninitialised struct field bo->unlocked_driver_cb
Zqiang (1): workqueue: Move the position of debug_work_activate() in __queue_work()
xinhui pan (2): drm/radeon: Fix size overflow drm/amdgpu: Fix size overflow
周琰杰 (Zhou Yanjie) (1): I2C: JZ4780: Fix bug for Ingenic X1000.
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml index 880e55f7a4b1..a7ee05896564 100644 --- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml @@ -49,7 +49,7 @@ properties: description: Reference to an nvmem node for the MAC address
- nvmem-cells-names: + nvmem-cell-names: const: mac-address
phy-connection-type: diff --git a/Makefile b/Makefile index 1be83283e032..9116941553b8 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 11 -SUBLEVEL = 13 +SUBLEVEL = 14 EXTRAVERSION = NAME = 💕 Valentine's Day Edition 💕
diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts index 646a06420c77..5bd6a66d2c2b 100644 --- a/arch/arm/boot/dts/armada-385-turris-omnia.dts +++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts @@ -32,7 +32,8 @@ soc { ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000 MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000 MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000 - MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>; + MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000 + MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
internal-regs {
@@ -389,6 +390,7 @@ &mdio { phy1: ethernet-phy@1 { compatible = "ethernet-phy-ieee802.3-c22"; reg = <1>; + marvell,reg-init = <3 18 0 0x4985>;
/* irq is connected to &pcawan pin 7 */ }; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 7a1e53195785..f28a96fcf23e 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -433,6 +433,7 @@ &usdhc2 { pinctrl-0 = <&pinctrl_usdhc2>; cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; + vmmc-supply = <&vdd_sd1_reg>; status = "disabled"; };
@@ -442,5 +443,6 @@ &usdhc3 { &pinctrl_usdhc3_cdwp>; cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; + vmmc-supply = <&vdd_sd0_reg>; status = "disabled"; }; diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c index f70d561f37f7..0659ab4cb0af 100644 --- a/arch/arm/mach-omap2/omap-secure.c +++ b/arch/arm/mach-omap2/omap-secure.c @@ -9,6 +9,7 @@ */
#include <linux/arm-smccc.h> +#include <linux/cpu_pm.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> @@ -20,6 +21,7 @@
#include "common.h" #include "omap-secure.h" +#include "soc.h"
static phys_addr_t omap_secure_memblock_base;
@@ -213,3 +215,40 @@ void __init omap_secure_init(void) { omap_optee_init_check(); } + +/* + * Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return + * address after MMU has been re-enabled after CPU1 has been woken up again. + * Otherwise the ROM code will attempt to use the earlier physical return + * address that got set with MMU off when waking up CPU1. Only used on secure + * devices. + */ +static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v) +{ + switch (cmd) { + case CPU_CLUSTER_PM_EXIT: + omap_secure_dispatcher(OMAP4_PPA_SERVICE_0, + FLAG_START_CRITICAL, + 0, 0, 0, 0, 0); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block secure_notifier_block = { + .notifier_call = cpu_notifier, +}; + +static int __init secure_pm_init(void) +{ + if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx()) + return 0; + + cpu_pm_register_notifier(&secure_notifier_block); + + return 0; +} +omap_arch_initcall(secure_pm_init); diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h index 4aaa95706d39..172069f31616 100644 --- a/arch/arm/mach-omap2/omap-secure.h +++ b/arch/arm/mach-omap2/omap-secure.h @@ -50,6 +50,7 @@ #define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107
/* Secure PPA(Primary Protected Application) APIs */ +#define OMAP4_PPA_SERVICE_0 0x21 #define OMAP4_PPA_L2_POR_INDEX 0x23 #define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25
diff --git a/arch/arm/mach-omap2/pmic-cpcap.c b/arch/arm/mach-omap2/pmic-cpcap.c index 09076ad0576d..668dc84fd31e 100644 --- a/arch/arm/mach-omap2/pmic-cpcap.c +++ b/arch/arm/mach-omap2/pmic-cpcap.c @@ -246,10 +246,10 @@ int __init omap4_cpcap_init(void) omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu);
if (of_machine_is_compatible("motorola,droid-bionic")) { - voltdm = voltdm_lookup("mpu"); + voltdm = voltdm_lookup("core"); omap_voltage_register_pmic(voltdm, &omap_cpcap_core);
- voltdm = voltdm_lookup("mpu"); + voltdm = voltdm_lookup("iva"); omap_voltage_register_pmic(voltdm, &omap_cpcap_iva); } else { voltdm = voltdm_lookup("core"); diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h index 5ccc4cc91959..a003e6af3353 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h +++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h @@ -124,7 +124,7 @@ #define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 #define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 -#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 #define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 diff --git a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h index b94b02080a34..68e8fa172974 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h +++ b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h @@ -130,7 +130,7 @@ #define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 #define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 -#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 +#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 #define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi index 994a2fce449a..1e37ae181acf 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi @@ -300,9 +300,11 @@ CP11X_LABEL(usb3_1): usb@510000 { };
CP11X_LABEL(sata0): sata@540000 { - compatible = "marvell,armada-8k-ahci"; + compatible = "marvell,armada-8k-ahci", + "generic-ahci"; reg = <0x540000 0x30000>; dma-coherent; + interrupts = <107 IRQ_TYPE_LEVEL_HIGH>; clocks = <&CP11X_LABEL(clk) 1 15>, <&CP11X_LABEL(clk) 1 16>; #address-cells = <1>; @@ -310,12 +312,10 @@ CP11X_LABEL(sata0): sata@540000 { status = "disabled";
sata-port@0 { - interrupts = <109 IRQ_TYPE_LEVEL_HIGH>; reg = <0>; };
sata-port@1 { - interrupts = <107 IRQ_TYPE_LEVEL_HIGH>; reg = <1>; }; }; diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index b3aa46090101..08179135905c 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -54,8 +54,7 @@
static inline unsigned long user_stack_pointer(struct pt_regs *regs) { - /* FIXME: should this be bspstore + nr_dirty regs? */ - return regs->ar_bspstore; + return regs->r12; }
static inline int is_syscall_success(struct pt_regs *regs) @@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs) unsigned long __ip = instruction_pointer(regs); \ (__ip & ~3UL) + ((__ip & 3UL) << 2); \ }) -/* - * Why not default? Because user_stack_pointer() on ia64 gives register - * stack backing store instead... - */ -#define current_user_stack_pointer() (current_pt_regs()->r12)
/* given a pointer to a task_struct, return the user's pt_regs */ # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c index 6eb98a7ad27d..ad5344ef5d33 100644 --- a/arch/nds32/mm/cacheflush.c +++ b/arch/nds32/mm/cacheflush.c @@ -238,7 +238,7 @@ void flush_dcache_page(struct page *page) { struct address_space *mapping;
- mapping = page_mapping(page); + mapping = page_mapping_file(page); if (mapping && !mapping_mapped(mapping)) set_bit(PG_dcache_dirty, &page->flags); else { diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index cf5ee9b0b393..84ee232278a6 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -72,7 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int)old, (unsigned int)new_); - case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_); + case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff); } __cmpxchg_called_with_bad_pointer(); return old; diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 79ee7750937d..b31e2160b233 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -191,3 +191,7 @@ $(obj)/prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o FORCE targets += prom_init_check
clean-files := vmlinux.lds + +# Force dependency (incbin is bad) +$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg +$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg diff --git a/arch/powerpc/kernel/ptrace/Makefile b/arch/powerpc/kernel/ptrace/Makefile index 8ebc11d1168d..77abd1a5a508 100644 --- a/arch/powerpc/kernel/ptrace/Makefile +++ b/arch/powerpc/kernel/ptrace/Makefile @@ -6,11 +6,11 @@ CFLAGS_ptrace-view.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y += ptrace.o ptrace-view.o -obj-$(CONFIG_PPC_FPU_REGS) += ptrace-fpu.o +obj-y += ptrace-fpu.o obj-$(CONFIG_COMPAT) += ptrace32.o obj-$(CONFIG_VSX) += ptrace-vsx.o ifneq ($(CONFIG_VSX),y) -obj-$(CONFIG_PPC_FPU_REGS) += ptrace-novsx.o +obj-y += ptrace-novsx.o endif obj-$(CONFIG_ALTIVEC) += ptrace-altivec.o obj-$(CONFIG_SPE) += ptrace-spe.o diff --git a/arch/powerpc/kernel/ptrace/ptrace-decl.h b/arch/powerpc/kernel/ptrace/ptrace-decl.h index 3487f2c9735c..eafe5f0f6289 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-decl.h +++ b/arch/powerpc/kernel/ptrace/ptrace-decl.h @@ -165,22 +165,8 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data); extern const struct user_regset_view user_ppc_native_view;
/* ptrace-fpu */ -#ifdef CONFIG_PPC_FPU_REGS int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data); int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data); -#else -static inline int -ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data) -{ - return -EIO; -} - -static inline int -ptrace_put_fpr(struct task_struct *child, int index, unsigned long data) -{ - return -EIO; -} -#endif
/* ptrace-(no)adv */ void ppc_gethwdinfo(struct ppc_debug_info *dbginfo); diff --git a/arch/powerpc/kernel/ptrace/ptrace-fpu.c b/arch/powerpc/kernel/ptrace/ptrace-fpu.c index 8301cb52dd99..5dca19361316 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-fpu.c +++ b/arch/powerpc/kernel/ptrace/ptrace-fpu.c @@ -8,32 +8,42 @@
int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data) { +#ifdef CONFIG_PPC_FPU_REGS unsigned int fpidx = index - PT_FPR0; +#endif
if (index > PT_FPSCR) return -EIO;
+#ifdef CONFIG_PPC_FPU_REGS flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long)); else *data = child->thread.fp_state.fpscr; +#else + *data = 0; +#endif
return 0; }
int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data) { +#ifdef CONFIG_PPC_FPU_REGS unsigned int fpidx = index - PT_FPR0; +#endif
if (index > PT_FPSCR) return -EIO;
+#ifdef CONFIG_PPC_FPU_REGS flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); else child->thread.fp_state.fpscr = data; +#endif
return 0; } diff --git a/arch/powerpc/kernel/ptrace/ptrace-novsx.c b/arch/powerpc/kernel/ptrace/ptrace-novsx.c index b3b36835658a..7433f3db979a 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-novsx.c +++ b/arch/powerpc/kernel/ptrace/ptrace-novsx.c @@ -21,12 +21,16 @@ int fpr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { +#ifdef CONFIG_PPC_FPU_REGS BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != offsetof(struct thread_fp_state, fpr[32]));
flush_fp_to_thread(target);
return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64)); +#else + return membuf_write(&to, &empty_zero_page, 33 * sizeof(u64)); +#endif }
/* @@ -46,6 +50,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { +#ifdef CONFIG_PPC_FPU_REGS BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != offsetof(struct thread_fp_state, fpr[32]));
@@ -53,4 +58,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fp_state, 0, -1); +#else + return 0; +#endif } diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c index 2bad8068f598..6ccffc65ac97 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-view.c +++ b/arch/powerpc/kernel/ptrace/ptrace-view.c @@ -522,13 +522,11 @@ static const struct user_regset native_regsets[] = { .size = sizeof(long), .align = sizeof(long), .regset_get = gpr_get, .set = gpr_set }, -#ifdef CONFIG_PPC_FPU_REGS [REGSET_FPR] = { .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, .size = sizeof(double), .align = sizeof(double), .regset_get = fpr_get, .set = fpr_set }, -#endif #ifdef CONFIG_ALTIVEC [REGSET_VMX] = { .core_note_type = NT_PPC_VMX, .n = 34, diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index af013b4244d3..2da027359798 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)
static int diag8_response(int cmdlen, char *response, int *rlen) { + unsigned long _cmdlen = cmdlen | 0x40000000L; + unsigned long _rlen = *rlen; register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; register unsigned long reg3 asm ("3") = (addr_t) response; - register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L; - register unsigned long reg5 asm ("5") = *rlen; + register unsigned long reg4 asm ("4") = _cmdlen; + register unsigned long reg5 asm ("5") = _rlen;
asm volatile( " diag %2,%0,0x8\n" diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 57ef2094af93..630ff08532be 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -132,7 +132,7 @@ void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); int wbinvd_on_all_cpus(void); -bool wakeup_cpu0(void); +void cond_wakeup_cpu0(void);
void native_smp_send_reschedule(int cpu); void native_send_call_func_ipi(const struct cpumask *mask); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f877150a91da..16703c35a944 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1659,13 +1659,17 @@ void play_dead_common(void) local_irq_disable(); }
-bool wakeup_cpu0(void) +/** + * cond_wakeup_cpu0 - Wake up CPU0 if needed. + * + * If NMI wants to wake up CPU0, start CPU0. + */ +void cond_wakeup_cpu0(void) { if (smp_processor_id() == 0 && enable_start_cpu0) - return true; - - return false; + start_cpu0(); } +EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
/* * We need to flush the caches before going to sleep, lest we have @@ -1734,11 +1738,8 @@ static inline void mwait_play_dead(void) __monitor(mwait_ptr, 0, 0); mb(); __mwait(eax, 0); - /* - * If NMI wants to wake up CPU0, start CPU0. - */ - if (wakeup_cpu0()) - start_cpu0(); + + cond_wakeup_cpu0(); } }
@@ -1749,11 +1750,8 @@ void hlt_play_dead(void)
while (1) { native_halt(); - /* - * If NMI wants to wake up CPU0, start CPU0. - */ - if (wakeup_cpu0()) - start_cpu0(); + + cond_wakeup_cpu0(); } }
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ac1874a2a70e..651e3e508959 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -556,7 +556,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection) tsk->thread.trap_nr = X86_TRAP_GP;
if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0)) - return; + goto exit;
show_signal(tsk, SIGSEGV, "", desc, regs, error_code); force_sig(SIGSEGV); @@ -1057,7 +1057,7 @@ static void math_error(struct pt_regs *regs, int trapnr) goto exit;
if (fixup_vdso_exception(regs, trapnr, 0, 0)) - return; + goto exit;
force_sig_fault(SIGFPE, si_code, (void __user *)uprobe_get_trap_addr(regs)); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ed861245ecf0..86cedf32526a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5985,6 +5985,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) struct kvm_mmu_page *sp; unsigned int ratio; LIST_HEAD(invalid_list); + bool flush = false; ulong to_zap;
rcu_idx = srcu_read_lock(&kvm->srcu); @@ -6005,20 +6006,20 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) struct kvm_mmu_page, lpage_disallowed_link); WARN_ON_ONCE(!sp->lpage_disallowed); - if (sp->tdp_mmu_page) - kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, - sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level)); - else { + if (sp->tdp_mmu_page) { + flush |= kvm_tdp_mmu_zap_sp(kvm, sp); + } else { kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); WARN_ON_ONCE(sp->lpage_disallowed); }
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { - kvm_mmu_commit_zap_page(kvm, &invalid_list); + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); cond_resched_lock(&kvm->mmu_lock); + flush = false; } } - kvm_mmu_commit_zap_page(kvm, &invalid_list); + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, rcu_idx); diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c index 87b7e16911db..1a09d212186b 100644 --- a/arch/x86/kvm/mmu/tdp_iter.c +++ b/arch/x86/kvm/mmu/tdp_iter.c @@ -22,21 +22,22 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
/* * Sets a TDP iterator to walk a pre-order traversal of the paging structure - * rooted at root_pt, starting with the walk to translate goal_gfn. + * rooted at root_pt, starting with the walk to translate next_last_level_gfn. */ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, - int min_level, gfn_t goal_gfn) + int min_level, gfn_t next_last_level_gfn) { WARN_ON(root_level < 1); WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
- iter->goal_gfn = goal_gfn; + iter->next_last_level_gfn = next_last_level_gfn; + iter->yielded_gfn = iter->next_last_level_gfn; iter->root_level = root_level; iter->min_level = min_level; iter->level = root_level; iter->pt_path[iter->level - 1] = root_pt;
- iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level); + iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); tdp_iter_refresh_sptep(iter);
iter->valid = true; @@ -82,7 +83,7 @@ static bool try_step_down(struct tdp_iter *iter)
iter->level--; iter->pt_path[iter->level - 1] = child_pt; - iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level); + iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); tdp_iter_refresh_sptep(iter);
return true; @@ -106,7 +107,7 @@ static bool try_step_side(struct tdp_iter *iter) return false;
iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); - iter->goal_gfn = iter->gfn; + iter->next_last_level_gfn = iter->gfn; iter->sptep++; iter->old_spte = READ_ONCE(*iter->sptep);
@@ -158,23 +159,6 @@ void tdp_iter_next(struct tdp_iter *iter) iter->valid = false; }
-/* - * Restart the walk over the paging structure from the root, starting from the - * highest gfn the iterator had previously reached. Assumes that the entire - * paging structure, except the root page, may have been completely torn down - * and rebuilt. - */ -void tdp_iter_refresh_walk(struct tdp_iter *iter) -{ - gfn_t goal_gfn = iter->goal_gfn; - - if (iter->gfn > goal_gfn) - goal_gfn = iter->gfn; - - tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], - iter->root_level, iter->min_level, goal_gfn); -} - u64 *tdp_iter_root_pt(struct tdp_iter *iter) { return iter->pt_path[iter->root_level - 1]; diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index 47170d0dc98e..d480c540ee27 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -15,7 +15,13 @@ struct tdp_iter { * The iterator will traverse the paging structure towards the mapping * for this GFN. */ - gfn_t goal_gfn; + gfn_t next_last_level_gfn; + /* + * The next_last_level_gfn at the time when the thread last + * yielded. Only yielding when the next_last_level_gfn != + * yielded_gfn helps ensure forward progress. + */ + gfn_t yielded_gfn; /* Pointers to the page tables traversed to reach the current SPTE */ u64 *pt_path[PT64_ROOT_MAX_LEVEL]; /* A pointer to the current SPTE */ @@ -52,9 +58,8 @@ struct tdp_iter { u64 *spte_to_child_pt(u64 pte, int level);
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, - int min_level, gfn_t goal_gfn); + int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); -void tdp_iter_refresh_walk(struct tdp_iter *iter); u64 *tdp_iter_root_pt(struct tdp_iter *iter);
#endif /* __KVM_X86_MMU_TDP_ITER_H */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 17976998bffb..a16559f31d94 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -105,7 +105,7 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) }
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t start, gfn_t end, bool can_yield); + gfn_t start, gfn_t end, bool can_yield, bool flush);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) { @@ -118,7 +118,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
list_del(&root->link);
- zap_gfn_range(kvm, root, 0, max_gfn, false); + zap_gfn_range(kvm, root, 0, max_gfn, false, false);
free_page((unsigned long)root->spt); kmem_cache_free(mmu_page_header_cache, root); @@ -413,27 +413,43 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, _mmu->shadow_root_level, _start, _end)
/* - * Flush the TLB if the process should drop kvm->mmu_lock. - * Return whether the caller still needs to flush the tlb. + * Yield if the MMU lock is contended or this thread needs to return control + * to the scheduler. + * + * If this function should yield and flush is set, it will perform a remote + * TLB flush before yielding. + * + * If this function yields, it will also reset the tdp_iter's walk over the + * paging structure and the calling function should skip to the next + * iteration to allow the iterator to continue its traversal from the + * paging structure root. + * + * Return true if this function yielded and the iterator's traversal was reset. + * Return false if a yield was not needed. */ -static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter) +static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, + struct tdp_iter *iter, bool flush) { - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { - kvm_flush_remote_tlbs(kvm); - cond_resched_lock(&kvm->mmu_lock); - tdp_iter_refresh_walk(iter); + /* Ensure forward progress has been made before yielding. */ + if (iter->next_last_level_gfn == iter->yielded_gfn) return false; - } else { - return true; - } -}
-static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter) -{ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (flush) + kvm_flush_remote_tlbs(kvm); + cond_resched_lock(&kvm->mmu_lock); - tdp_iter_refresh_walk(iter); + + WARN_ON(iter->gfn > iter->next_last_level_gfn); + + tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], + iter->root_level, iter->min_level, + iter->next_last_level_gfn); + + return true; } + + return false; }
/* @@ -445,15 +461,22 @@ static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter) * scheduler needs the CPU or there is contention on the MMU lock. If this * function cannot yield, it will not release the MMU lock or reschedule and * the caller must ensure it does not supply too large a GFN range, or the - * operation can cause a soft lockup. + * operation can cause a soft lockup. Note, in some use cases a flush may be + * required by prior actions. Ensure the pending flush is performed prior to + * yielding. */ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, - gfn_t start, gfn_t end, bool can_yield) + gfn_t start, gfn_t end, bool can_yield, bool flush) { struct tdp_iter iter; - bool flush_needed = false;
tdp_root_for_each_pte(iter, root, start, end) { + if (can_yield && + tdp_mmu_iter_cond_resched(kvm, &iter, flush)) { + flush = false; + continue; + } + if (!is_shadow_present_pte(iter.old_spte)) continue;
@@ -468,13 +491,10 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, continue;
tdp_mmu_set_spte(kvm, &iter, 0); - - if (can_yield) - flush_needed = tdp_mmu_iter_flush_cond_resched(kvm, &iter); - else - flush_needed = true; + flush = true; } - return flush_needed; + + return flush; }
/* @@ -483,13 +503,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, * SPTEs have been cleared and a TLB flush is needed before releasing the * MMU lock. */ -bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end) +bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end, + bool can_yield) { struct kvm_mmu_page *root; bool flush = false;
for_each_tdp_mmu_root_yield_safe(kvm, root) - flush |= zap_gfn_range(kvm, root, start, end, true); + flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
return flush; } @@ -683,7 +704,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, unsigned long unused) { - return zap_gfn_range(kvm, root, start, end, false); + return zap_gfn_range(kvm, root, start, end, false, false); }
int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start, @@ -836,6 +857,9 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
for_each_tdp_pte_min_level(iter, root->spt, root->role.level, min_level, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) + continue; + if (!is_shadow_present_pte(iter.old_spte) || !is_last_spte(iter.old_spte, iter.level)) continue; @@ -844,8 +868,6 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); spte_set = true; - - tdp_mmu_iter_cond_resched(kvm, &iter); } return spte_set; } @@ -889,6 +911,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, bool spte_set = false;
tdp_root_for_each_leaf_pte(iter, root, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) + continue; + if (spte_ad_need_write_protect(iter.old_spte)) { if (is_writable_pte(iter.old_spte)) new_spte = iter.old_spte & ~PT_WRITABLE_MASK; @@ -903,8 +928,6 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); spte_set = true; - - tdp_mmu_iter_cond_resched(kvm, &iter); } return spte_set; } @@ -1012,6 +1035,9 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, bool spte_set = false;
tdp_root_for_each_pte(iter, root, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) + continue; + if (!is_shadow_present_pte(iter.old_spte)) continue;
@@ -1019,8 +1045,6 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte(kvm, &iter, new_spte); spte_set = true; - - tdp_mmu_iter_cond_resched(kvm, &iter); }
return spte_set; @@ -1061,6 +1085,11 @@ static void zap_collapsible_spte_range(struct kvm *kvm, bool spte_set = false;
tdp_root_for_each_pte(iter, root, start, end) { + if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) { + spte_set = false; + continue; + } + if (!is_shadow_present_pte(iter.old_spte) || !is_last_spte(iter.old_spte, iter.level)) continue; @@ -1073,7 +1102,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
tdp_mmu_set_spte(kvm, &iter, 0);
- spte_set = tdp_mmu_iter_flush_cond_resched(kvm, &iter); + spte_set = true; }
if (spte_set) diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index cbbdbadd1526..a7a3f6db263d 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -12,7 +12,23 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root); hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end); +bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end, + bool can_yield); +static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, + gfn_t end) +{ + return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true); +} +static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level); + + /* + * Don't allow yielding, as the caller may have pending pages to zap + * on the shadow MMU. + */ + return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false); +} void kvm_tdp_mmu_zap_all(struct kvm *kvm);
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 768a6b4d2368..4e2d76b8b697 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -544,9 +544,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) return -ENODEV;
#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU) - /* If NMI wants to wake up CPU0, start CPU0. */ - if (wakeup_cpu0()) - start_cpu0(); + cond_wakeup_cpu0(); #endif }
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index e2cf3b29123e..37a5e5f8b221 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -292,14 +292,16 @@ int driver_deferred_probe_check_state(struct device *dev)
static void deferred_probe_timeout_work_func(struct work_struct *work) { - struct device_private *private, *p; + struct device_private *p;
driver_deferred_probe_timeout = 0; driver_deferred_probe_trigger(); flush_work(&deferred_probe_work);
- list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe) - dev_info(private->device, "deferred probe pending\n"); + mutex_lock(&deferred_probe_mutex); + list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) + dev_info(p->device, "deferred probe pending\n"); + mutex_unlock(&deferred_probe_mutex); wake_up_all(&probe_timeout_waitqueue); } static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index a086dd34f932..4f501e4842ab 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -125,7 +125,7 @@ config AGP_HP_ZX1
config AGP_PARISC tristate "HP Quicksilver AGP support" - depends on AGP && PARISC && 64BIT + depends on AGP && PARISC && 64BIT && IOMMU_SBA help This option gives you AGP GART support for the HP Quicksilver AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 8c1d04db990d..571ae066e548 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -4336,20 +4336,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) /* search the list of notifiers for this clk */ list_for_each_entry(cn, &clk_notifier_list, node) if (cn->clk == clk) - break; + goto found;
/* if clk wasn't in the notifier list, allocate new clk_notifier */ - if (cn->clk != clk) { - cn = kzalloc(sizeof(*cn), GFP_KERNEL); - if (!cn) - goto out; + cn = kzalloc(sizeof(*cn), GFP_KERNEL); + if (!cn) + goto out;
- cn->clk = clk; - srcu_init_notifier_head(&cn->notifier_head); + cn->clk = clk; + srcu_init_notifier_head(&cn->notifier_head);
- list_add(&cn->node, &clk_notifier_list); - } + list_add(&cn->node, &clk_notifier_list);
+found: ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
clk->core->notifier_count++; @@ -4374,32 +4373,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register); */ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) { - struct clk_notifier *cn = NULL; - int ret = -EINVAL; + struct clk_notifier *cn; + int ret = -ENOENT;
if (!clk || !nb) return -EINVAL;
clk_prepare_lock();
- list_for_each_entry(cn, &clk_notifier_list, node) - if (cn->clk == clk) - break; - - if (cn->clk == clk) { - ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); + list_for_each_entry(cn, &clk_notifier_list, node) { + if (cn->clk == clk) { + ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
- clk->core->notifier_count--; + clk->core->notifier_count--;
- /* XXX the notifier code should handle this better */ - if (!cn->notifier_head.head) { - srcu_cleanup_notifier_head(&cn->notifier_head); - list_del(&cn->node); - kfree(cn); + /* XXX the notifier code should handle this better */ + if (!cn->notifier_head.head) { + srcu_cleanup_notifier_head(&cn->notifier_head); + list_del(&cn->node); + kfree(cn); + } + break; } - - } else { - ret = -ENOENT; }
clk_prepare_unlock(); diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c index dbac5651ab85..9bcf2f8ed4de 100644 --- a/drivers/clk/qcom/camcc-sc7180.c +++ b/drivers/clk/qcom/camcc-sc7180.c @@ -304,7 +304,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = { .name = "cam_cc_bps_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -325,7 +325,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = { .name = "cam_cc_cci_0_clk_src", .parent_data = cam_cc_parent_data_5, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -339,7 +339,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = { .name = "cam_cc_cci_1_clk_src", .parent_data = cam_cc_parent_data_5, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -360,7 +360,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { .name = "cam_cc_cphy_rx_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -379,7 +379,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { .name = "cam_cc_csi0phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -393,7 +393,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { .name = "cam_cc_csi1phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -407,7 +407,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = { .name = "cam_cc_csi2phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -421,7 +421,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { .name = "cam_cc_csi3phytimer_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -443,7 +443,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { .name = "cam_cc_fast_ahb_clk_src", .parent_data = cam_cc_parent_data_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -466,7 +466,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = { .name = "cam_cc_icp_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -488,7 +488,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = { .name = "cam_cc_ife_0_clk_src", .parent_data = cam_cc_parent_data_4, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -510,7 +510,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = { .name = "cam_cc_ife_0_csid_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -524,7 +524,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = { .name = "cam_cc_ife_1_clk_src", .parent_data = cam_cc_parent_data_4, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -538,7 +538,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = { .name = "cam_cc_ife_1_csid_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -553,7 +553,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = { .parent_data = cam_cc_parent_data_4, .num_parents = 4, .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -567,7 +567,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { .name = "cam_cc_ife_lite_csid_clk_src", .parent_data = cam_cc_parent_data_3, .num_parents = 6, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -590,7 +590,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = { .name = "cam_cc_ipe_0_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -613,7 +613,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = { .name = "cam_cc_jpeg_clk_src", .parent_data = cam_cc_parent_data_2, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -635,7 +635,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = { .name = "cam_cc_lrme_clk_src", .parent_data = cam_cc_parent_data_6, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -656,7 +656,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = { .name = "cam_cc_mclk0_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -670,7 +670,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = { .name = "cam_cc_mclk1_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -684,7 +684,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = { .name = "cam_cc_mclk2_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -698,7 +698,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = { .name = "cam_cc_mclk3_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -712,7 +712,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = { .name = "cam_cc_mclk4_clk_src", .parent_data = cam_cc_parent_data_1, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -732,7 +732,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = 4, .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index 43ecd507bf83..cf94a12459ea 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; val &= GENMASK(socfpgaclk->width - 1, 0); /* Check for GPIO_DB_CLK by its offset */ - if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) + if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) div = val + 1; else div = (1 << val); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a4a47305574c..c85fdc78fcc9 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -365,22 +365,18 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc) * * Looks for device property "gpio-line-names" and if it exists assigns * GPIO line names for the chip. The memory allocated for the assigned - * names belong to the underlying software node and should not be released + * names belong to the underlying firmware node and should not be released * by the caller. */ static int devprop_gpiochip_set_names(struct gpio_chip *chip) { struct gpio_device *gdev = chip->gpiodev; - struct device *dev = chip->parent; + struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev); const char **names; int ret, i; int count;
- /* GPIO chip may not have a parent device whose properties we inspect. */ - if (!dev) - return 0; - - count = device_property_string_array_count(dev, "gpio-line-names"); + count = fwnode_property_string_array_count(fwnode, "gpio-line-names"); if (count < 0) return 0;
@@ -394,7 +390,7 @@ static int devprop_gpiochip_set_names(struct gpio_chip *chip) if (!names) return -ENOMEM;
- ret = device_property_read_string_array(dev, "gpio-line-names", + ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", names, count); if (ret < 0) { dev_warn(&gdev->dev, "failed to read GPIO line names\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 4d8f19ab1014..8b87991a0470 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -907,7 +907,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
/* Allocate an SG array and squash pages into it */ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, - ttm->num_pages << PAGE_SHIFT, + (u64)ttm->num_pages << PAGE_SHIFT, GFP_KERNEL); if (r) goto release_sg; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index c9b143781105..c22956e8773d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -1224,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (hwmgr->chip_id == CHIP_POLARIS10) || (hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12) || - (hwmgr->chip_id == CHIP_TONGA)) + (hwmgr->chip_id == CHIP_TONGA) || + (hwmgr->chip_id == CHIP_TOPAZ)) PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c index e21fb14d5e07..833d0c1be4f1 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.c +++ b/drivers/gpu/drm/i915/display/intel_acpi.c @@ -84,13 +84,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle) return; }
+ if (!pkg->package.count) { + DRM_DEBUG_DRIVER("no connection in _DSM\n"); + return; + } + connector_count = &pkg->package.elements[0]; DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", (unsigned long long)connector_count->integer.value); for (i = 1; i < pkg->package.count; i++) { union acpi_object *obj = &pkg->package.elements[i]; - union acpi_object *connector_id = &obj->package.elements[0]; - union acpi_object *info = &obj->package.elements[1]; + union acpi_object *connector_id; + union acpi_object *info; + + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) { + DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i); + continue; + } + + connector_id = &obj->package.elements[0]; + info = &obj->package.elements[1]; + if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) { + DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i); + continue; + } + DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", (unsigned long long)connector_id->integer.value); DRM_DEBUG_DRIVER(" port id: %s\n", diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index e7a8442b59af..a676811ef69d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -566,17 +566,17 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, } else { /* * a650 tier targets don't need whereami but still need to be - * equal to or newer than 1.95 for other security fixes + * equal to or newer than 0.95 for other security fixes */ if (adreno_is_a650(adreno_gpu)) { - if ((buf[0] & 0xfff) >= 0x195) { + if ((buf[0] & 0xfff) >= 0x095) { ret = true; goto out; }
DRM_DEV_ERROR(&gpu->pdev->dev, "a650 SQE ucode is too old. Have version %x need at least %x\n", - buf[0] & 0xfff, 0x195); + buf[0] & 0xfff, 0x095); }
/* diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 8981cfa9dbc3..92e6f1b94738 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -496,7 +496,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_TOP, mode_sel); DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); - DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0)); + if (cfg->merge_3d) + DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, + BIT(cfg->merge_3d - MERGE_3D_0)); }
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index a5c6b8c23336..196907689c82 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -570,6 +570,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) kfree(priv); err_put_drm_dev: drm_dev_put(ddev); + platform_set_drvdata(pdev, NULL); return ret; }
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 23195d5d4e91..176cb55062be 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -365,7 +365,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt * if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) { /* check that we only pin down anonymous memory to prevent problems with writeback */ - unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; + unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE; struct vm_area_struct *vma; vma = find_vma(gtt->usermm, gtt->userptr); if (!vma || vma->vm_file || vma->vm_end < end) @@ -387,7 +387,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt * } while (pinned < ttm->num_pages);
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, - ttm->num_pages << PAGE_SHIFT, + (u64)ttm->num_pages << PAGE_SHIFT, GFP_KERNEL); if (r) goto release_sg; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index ea710beb8e00..351c601f0ddb 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -210,6 +210,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) { const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc); const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc); + struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev); u32 fifo_len_bytes = pv_data->fifo_depth;
/* @@ -238,6 +239,22 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) if (crtc_data->hvs_output == 5) return 32;
+ /* + * It looks like in some situations, we will overflow + * the PixelValve FIFO (with the bit 10 of PV stat being + * set) and stall the HVS / PV, eventually resulting in + * a page flip timeout. + * + * Displaying the video overlay during a playback with + * Kodi on an RPi3 seems to be a great solution with a + * failure rate around 50%. + * + * Removing 1 from the FIFO full level however + * seems to completely remove that issue. + */ + if (!vc4->hvs->hvs5) + return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1; + return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; } } diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index d6425ad6e6a3..2871cf2ee8b4 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -129,6 +129,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { dev_err(dev->dev, "High Speed not supported!\n"); + t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; dev->master_cfg &= ~DW_IC_CON_SPEED_MASK; dev->master_cfg |= DW_IC_CON_SPEED_FAST; dev->hs_hcnt = 0; diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index cb4a25ebb890..2a946c207928 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -526,8 +526,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id) i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA); data = *i2c->wbuf; data &= ~JZ4780_I2C_DC_READ; - if ((!i2c->stop_hold) && (i2c->cdata->version >= - ID_X1000)) + if ((i2c->wt_len == 1) && (!i2c->stop_hold) && + (i2c->cdata->version >= ID_X1000)) data |= X1000_I2C_DC_STOP; jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data); i2c->wbuf++; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 63ebf722a424..f21362355973 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -378,7 +378,7 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap) static int i2c_init_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; - char *err_str; + char *err_str, *err_level = KERN_ERR;
if (!bri) return 0; @@ -387,7 +387,8 @@ static int i2c_init_recovery(struct i2c_adapter *adap) return -EPROBE_DEFER;
if (!bri->recover_bus) { - err_str = "no recover_bus() found"; + err_str = "no suitable method provided"; + err_level = KERN_DEBUG; goto err; }
@@ -414,7 +415,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
return 0; err: - dev_err(&adap->dev, "Not using recovery: %s\n", err_str); + dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str); adap->bus_recovery_info = NULL;
return -EINVAL; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 0abce004a959..65e3e7df8a4b 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, - .len = sizeof(struct rdma_nla_ls_gid)}, + .len = sizeof(struct rdma_nla_ls_gid), + .validation_type = NLA_VALIDATE_MIN, + .min = sizeof(struct rdma_nla_ls_gid)}, };
static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 81903749d241..e42c812e74c3 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) c4iw_init_wr_wait(ep->com.wr_waitp); err = cxgb4_remove_server( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], true); + ep->com.dev->rdev.lldi.rxq_ids[0], + ep->com.local_addr.ss_family == AF_INET6); if (err) goto done; err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 2a91b8d95e12..04b1e8f021f6 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd, */ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) { - int node = pcibus_to_node(dd->pcidev->bus); struct hfi1_affinity_node *entry; const struct cpumask *local_mask; int curr_cpu, possible, i, ret; bool new_entry = false;
- /* - * If the BIOS does not have the NUMA node information set, select - * NUMA 0 so we get consistent performance. - */ - if (node < 0) { - dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); - node = 0; - } - dd->node = node; - local_mask = cpumask_of_node(dd->node); if (cpumask_first(local_mask) >= nr_cpu_ids) local_mask = topology_core_cpumask(0); @@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) * create an entry in the global affinity structure and initialize it. */ if (!entry) { - entry = node_affinity_allocate(node); + entry = node_affinity_allocate(dd->node); if (!entry) { dd_dev_err(dd, "Unable to allocate global affinity node\n"); @@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) if (new_entry) node_affinity_add_tail(entry);
+ dd->affinity_entry = entry; mutex_unlock(&node_affinity.lock);
return 0; @@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd) { struct hfi1_affinity_node *entry;
- if (dd->node < 0) - return; - mutex_lock(&node_affinity.lock); + if (!dd->affinity_entry) + goto unlock; entry = node_affinity_lookup(dd->node); if (!entry) goto unlock; @@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd) */ _dev_comp_vect_cpu_mask_clean_up(dd, entry); unlock: + dd->affinity_entry = NULL; mutex_unlock(&node_affinity.lock); - dd->node = NUMA_NO_NODE; }
/* diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index e09e8244a94c..2a9a040569eb 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1409,6 +1409,7 @@ struct hfi1_devdata { spinlock_t irq_src_lock; int vnic_num_vports; struct net_device *dummy_netdev; + struct hfi1_affinity_node *affinity_entry;
/* Keeps track of IPoIB RSM rule users */ atomic_t ipoib_rsm_usr_num; diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index cb7ad1288821..786c6316273f 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, dd->pport = (struct hfi1_pportdata *)(dd + 1); dd->pcidev = pdev; pci_set_drvdata(pdev, dd); - dd->node = NUMA_NO_NODE;
ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, GFP_KERNEL); @@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, goto bail; } rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); + /* + * If the BIOS does not have the NUMA node information set, select + * NUMA 0 so we get consistent performance. + */ + dd->node = pcibus_to_node(pdev->bus); + if (dd->node == NUMA_NO_NODE) { + dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); + dd->node = 0; + }
/* * Initialize all locks for the device. This needs to be as early as diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c index 6d263c9749b3..ea95baada2b6 100644 --- a/drivers/infiniband/hw/hfi1/netdev_rx.c +++ b/drivers/infiniband/hw/hfi1/netdev_rx.c @@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts, return 0; }
- cpumask_and(node_cpu_mask, cpu_mask, - cpumask_of_node(pcibus_to_node(dd->pcidev->bus))); + cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
available_cpus = cpumask_weight(node_cpu_mask);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 0eb6a7a618e0..9ea542270ed4 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, * TGT QP isn't associated with RQ/SQ */ if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) && - (attrs->qp_type != IB_QPT_XRC_TGT)) { + (attrs->qp_type != IB_QPT_XRC_TGT) && + (attrs->qp_type != IB_QPT_XRC_INI)) { struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq); struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 394c1f6822b9..ee37c5af3a8c 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -2735,8 +2735,8 @@ void rtrs_clt_close(struct rtrs_clt *clt)
/* Now it is safe to iterate over all paths without locks */ list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { - rtrs_clt_destroy_sess_files(sess, NULL); rtrs_clt_close_conns(sess, true); + rtrs_clt_destroy_sess_files(sess, NULL); kobject_put(&sess->kobj); } free_clt(clt); diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 25859d16d06f..e7be36dc2159 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -314,6 +314,18 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len) return ret; }
+static int mcp251x_spi_write(struct spi_device *spi, int len) +{ + struct mcp251x_priv *priv = spi_get_drvdata(spi); + int ret; + + ret = spi_write(spi, priv->spi_tx_buf, len); + if (ret) + dev_err(&spi->dev, "spi write failed: ret = %d\n", ret); + + return ret; +} + static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg) { struct mcp251x_priv *priv = spi_get_drvdata(spi); @@ -361,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val) priv->spi_tx_buf[1] = reg; priv->spi_tx_buf[2] = val;
- mcp251x_spi_trans(spi, 3); + mcp251x_spi_write(spi, 3); }
static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2) @@ -373,7 +385,7 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2) priv->spi_tx_buf[2] = v1; priv->spi_tx_buf[3] = v2;
- mcp251x_spi_trans(spi, 4); + mcp251x_spi_write(spi, 4); }
static void mcp251x_write_bits(struct spi_device *spi, u8 reg, @@ -386,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg, priv->spi_tx_buf[2] = mask; priv->spi_tx_buf[3] = val;
- mcp251x_spi_trans(spi, 4); + mcp251x_spi_write(spi, 4); }
static u8 mcp251x_read_stat(struct spi_device *spi) @@ -618,7 +630,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, buf[i]); } else { memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len); - mcp251x_spi_trans(spi, TXBDAT_OFF + len); + mcp251x_spi_write(spi, TXBDAT_OFF + len); } }
@@ -650,7 +662,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
/* use INSTRUCTION_RTS, to avoid "repeated frame problem" */ priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx); - mcp251x_spi_trans(priv->spi, 1); + mcp251x_spi_write(priv->spi, 1); }
static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, @@ -888,7 +900,7 @@ static int mcp251x_hw_reset(struct spi_device *spi) mdelay(MCP251X_OST_DELAY_MS);
priv->spi_tx_buf[0] = INSTRUCTION_RESET; - ret = mcp251x_spi_trans(spi, 1); + ret = mcp251x_spi_write(spi, 1); if (ret) return ret;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 251835ea15aa..18c7d8c151a4 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -857,7 +857,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) - goto lbl_unregister_candev; + goto adap_dev_free; }
/* get device number early */ @@ -869,6 +869,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
return 0;
+adap_dev_free: + if (dev->adapter->dev_free) + dev->adapter->dev_free(dev); + lbl_unregister_candev: unregister_candev(netdev);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 662e68a0e7e6..93c7fa1fd4cb 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -93,8 +93,12 @@
/* GSWIP MII Registers */ #define GSWIP_MII_CFGp(p) (0x2 * (p)) +#define GSWIP_MII_CFG_RESET BIT(15) #define GSWIP_MII_CFG_EN BIT(14) +#define GSWIP_MII_CFG_ISOLATE BIT(13) #define GSWIP_MII_CFG_LDCLKDIS BIT(12) +#define GSWIP_MII_CFG_RGMII_IBS BIT(8) +#define GSWIP_MII_CFG_RMII_CLK BIT(7) #define GSWIP_MII_CFG_MODE_MIIP 0x0 #define GSWIP_MII_CFG_MODE_MIIM 0x1 #define GSWIP_MII_CFG_MODE_RMIIP 0x2 @@ -190,6 +194,23 @@ #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
#define GSWIP_MAC_FLEN 0x8C5 +#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC)) +#define GSWIP_MAC_CTRL_0_PADEN BIT(8) +#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7) +#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070 +#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000 +#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010 +#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020 +#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030 +#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040 +#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C +#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000 +#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004 +#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C +#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003 +#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000 +#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001 +#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002 #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
@@ -653,16 +674,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port, GSWIP_SDMA_PCTRLp(port));
if (!dsa_is_cpu_port(ds, port)) { - u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO | - GSWIP_MDIO_PHY_SPEED_AUTO | - GSWIP_MDIO_PHY_FDUP_AUTO | - GSWIP_MDIO_PHY_FCONTX_AUTO | - GSWIP_MDIO_PHY_FCONRX_AUTO | - (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK); - - gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port)); - /* Activate MDIO auto polling */ - gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0); + u32 mdio_phy = 0; + + if (phydev) + mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; + + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); }
return 0; @@ -675,14 +693,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port) if (!dsa_is_user_port(ds, port)) return;
- if (!dsa_is_cpu_port(ds, port)) { - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN, - GSWIP_MDIO_PHY_LINK_MASK, - GSWIP_MDIO_PHYp(port)); - /* Deactivate MDIO auto polling */ - gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0); - } - gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, GSWIP_FDMA_PCTRLp(port)); gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, @@ -806,14 +816,32 @@ static int gswip_setup(struct dsa_switch *ds) gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2); gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
- /* disable PHY auto polling */ + /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an + * interoperability problem with this auto polling mechanism because + * their status registers think that the link is in a different state + * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set + * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the + * auto polling state machine consider the link being negotiated with + * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads + * to the switch port being completely dead (RX and TX are both not + * working). + * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F + * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes + * it would work fine for a few minutes to hours and then stop, on + * other device it would no traffic could be sent or received at all. + * Testing shows that when PHY auto polling is disabled these problems + * go away. + */ gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); + /* Configure the MDIO Clock 2.5 MHz */ gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
- /* Disable the xMII link */ + /* Disable the xMII interface and clear it's isolation bit */ for (i = 0; i < priv->hw_info->max_ports; i++) - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i); + gswip_mii_mask_cfg(priv, + GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE, + 0, i);
/* enable special tag insertion on cpu port */ gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, @@ -1464,6 +1492,112 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port, return; }
+static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) +{ + u32 mdio_phy; + + if (link) + mdio_phy = GSWIP_MDIO_PHY_LINK_UP; + else + mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN; + + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); +} + +static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed, + phy_interface_t interface) +{ + u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0; + + switch (speed) { + case SPEED_10: + mdio_phy = GSWIP_MDIO_PHY_SPEED_M10; + + if (interface == PHY_INTERFACE_MODE_RMII) + mii_cfg = GSWIP_MII_CFG_RATE_M50; + else + mii_cfg = GSWIP_MII_CFG_RATE_M2P5; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; + break; + + case SPEED_100: + mdio_phy = GSWIP_MDIO_PHY_SPEED_M100; + + if (interface == PHY_INTERFACE_MODE_RMII) + mii_cfg = GSWIP_MII_CFG_RATE_M50; + else + mii_cfg = GSWIP_MII_CFG_RATE_M25; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; + break; + + case SPEED_1000: + mdio_phy = GSWIP_MDIO_PHY_SPEED_G1; + + mii_cfg = GSWIP_MII_CFG_RATE_M125; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII; + break; + } + + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port); + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0, + GSWIP_MAC_CTRL_0p(port)); +} + +static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex) +{ + u32 mac_ctrl_0, mdio_phy; + + if (duplex == DUPLEX_FULL) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN; + mdio_phy = GSWIP_MDIO_PHY_FDUP_EN; + } else { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS; + mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS; + } + + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0, + GSWIP_MAC_CTRL_0p(port)); + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); +} + +static void gswip_port_set_pause(struct gswip_priv *priv, int port, + bool tx_pause, bool rx_pause) +{ + u32 mac_ctrl_0, mdio_phy; + + if (tx_pause && rx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | + GSWIP_MDIO_PHY_FCONRX_EN; + } else if (tx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | + GSWIP_MDIO_PHY_FCONRX_DIS; + } else if (rx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | + GSWIP_MDIO_PHY_FCONRX_EN; + } else { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | + GSWIP_MDIO_PHY_FCONRX_DIS; + } + + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK, + mac_ctrl_0, GSWIP_MAC_CTRL_0p(port)); + gswip_mdio_mask(priv, + GSWIP_MDIO_PHY_FCONTX_MASK | + GSWIP_MDIO_PHY_FCONRX_MASK, + mdio_phy, GSWIP_MDIO_PHYp(port)); +} + static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) @@ -1483,6 +1617,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, break; case PHY_INTERFACE_MODE_RMII: miicfg |= GSWIP_MII_CFG_MODE_RMIIM; + + /* Configure the RMII clock as output: */ + miicfg |= GSWIP_MII_CFG_RMII_CLK; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: @@ -1495,7 +1632,11 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, "Unsupported interface: %d\n", state->interface); return; } - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port); + + gswip_mii_mask_cfg(priv, + GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK | + GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS, + miicfg, port);
switch (state->interface) { case PHY_INTERFACE_MODE_RGMII_ID: @@ -1520,6 +1661,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port, struct gswip_priv *priv = ds->priv;
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); + + if (!dsa_is_cpu_port(ds, port)) + gswip_port_set_link(priv, port, false); }
static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, @@ -1531,6 +1675,13 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, { struct gswip_priv *priv = ds->priv;
+ if (!dsa_is_cpu_port(ds, port)) { + gswip_port_set_link(priv, port, true); + gswip_port_set_speed(priv, port, speed, interface); + gswip_port_set_duplex(priv, port, duplex); + gswip_port_set_pause(priv, port, tx_pause, rx_pause); + } + gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ba8321ec1ee7..3305979a9f7c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -180,9 +180,9 @@ #define XGBE_DMA_SYS_AWCR 0x30303030
/* DMA cache settings - PCI device */ -#define XGBE_DMA_PCI_ARCR 0x00000003 -#define XGBE_DMA_PCI_AWCR 0x13131313 -#define XGBE_DMA_PCI_AWARCR 0x00000313 +#define XGBE_DMA_PCI_ARCR 0x000f0f0f +#define XGBE_DMA_PCI_AWCR 0x0f0f0f0f +#define XGBE_DMA_PCI_AWARCR 0x00000f0f
/* DMA channel interrupt modes */ #define XGBE_IRQ_MODE_EDGE 0 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 07cdb38e7d11..fbedbceef2d1 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3235,6 +3235,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) bool cmp_b = false; bool cmp_c = false;
+ if (!macb_is_gem(bp)) + return; + tp4sp_v = &(fs->h_u.tcp_ip4_spec); tp4sp_m = &(fs->m_u.tcp_ip4_spec);
@@ -3603,6 +3606,7 @@ static void macb_restore_features(struct macb *bp) { struct net_device *netdev = bp->dev; netdev_features_t features = netdev->features; + struct ethtool_rx_fs_item *item;
/* TX checksum offload */ macb_set_txcsum_feature(bp, features); @@ -3611,6 +3615,9 @@ static void macb_restore_features(struct macb *bp) macb_set_rxcsum_feature(bp, features);
/* RX Flow Filters */ + list_for_each_entry(item, &bp->rx_fs_list.list, list) + gem_prog_cmp_regs(bp, &item->fs); + macb_set_rxflow_feature(bp, features); }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 75474f810249..c5b0e725b238 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1794,11 +1794,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer temp_buff = { 0 }; struct sge_qbase_reg_field *sge_qbase; struct ireg_buf *ch_sge_dbg; + u8 padap_running = 0; int i, rc; + u32 size;
- rc = cudbg_get_buff(pdbg_init, dbg_buff, - sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase), - &temp_buff); + /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can + * lead to SGE missing doorbells under heavy traffic. So, only + * collect them when adapter is idle. + */ + for_each_port(padap, i) { + padap_running = netif_running(padap->port[i]); + if (padap_running) + break; + } + + size = sizeof(*ch_sge_dbg) * 2; + if (!padap_running) + size += sizeof(*sge_qbase); + + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff); if (rc) return rc;
@@ -1820,7 +1834,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, ch_sge_dbg++; }
- if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { + if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 && + !padap_running) { sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg; /* 1 addr reg SGE_QBASE_INDEX and 4 data reg * SGE_QBASE_MAP[0-3] diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 98d01a7497ec..581670dced6e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2090,7 +2090,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, - 0x11fc, 0x1274, + 0x11fc, 0x123c, + 0x1254, 0x1274, 0x1280, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4fab2ee5bbf5..e4d9c4c640e5 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -364,7 +364,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
static int gfar_set_mac_addr(struct net_device *dev, void *p) { - eth_mac_addr(dev, p); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret;
gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 674b3a22e91f..3bd7bc794677 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2575,14 +2575,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); + hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
hclgevf_update_link_mode(hdev);
- clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); - return 0; }
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 118473dfdcbd..fe1258778cbc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -142,6 +142,7 @@ enum i40e_state_t { __I40E_VIRTCHNL_OP_PENDING, __I40E_RECOVERY_MODE, __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */ + __I40E_VFS_RELEASING, /* This must be last as it determines the size of the BITMAP */ __I40E_STATE_SIZE__, }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d7c13ca9be7d..d627b59ad446 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -578,6 +578,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, case RING_TYPE_XDP: ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL); break; + default: + ring = NULL; + break; } if (!ring) return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9e81f85ee2d8..31d48a85cfaf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], I40E_STAT(struct i40e_vsi, _name, _stat) #define I40E_VEB_STAT(_name, _stat) \ I40E_STAT(struct i40e_veb, _name, _stat) +#define I40E_VEB_TC_STAT(_name, _stat) \ + I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat) #define I40E_PFC_STAT(_name, _stat) \ I40E_STAT(struct i40e_pfc_stats, _name, _stat) #define I40E_QUEUE_STAT(_name, _stat) \ @@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = { I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol), };
+struct i40e_cp_veb_tc_stats { + u64 tc_rx_packets; + u64 tc_rx_bytes; + u64 tc_tx_packets; + u64 tc_tx_bytes; +}; + static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = { - I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets), - I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes), - I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets), - I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes), + I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets), + I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes), + I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets), + I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes), };
static const struct i40e_stats i40e_gstrings_misc_stats[] = { @@ -1101,6 +1110,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
/* Set flow control settings */ ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
switch (hw->fc.requested_mode) { case I40E_FC_FULL: @@ -2216,6 +2226,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } }
+/** + * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure + * @tc: the TC statistics in VEB structure (veb->tc_stats) + * @i: the index of traffic class in (veb->tc_stats) structure to copy + * + * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to + * one dimensional structure i40e_cp_veb_tc_stats. + * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC + * statistics for the given TC. + **/ +static struct i40e_cp_veb_tc_stats +i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i) +{ + struct i40e_cp_veb_tc_stats veb_tc = { + .tc_rx_packets = tc->tc_rx_packets[i], + .tc_rx_bytes = tc->tc_rx_bytes[i], + .tc_tx_packets = tc->tc_tx_packets[i], + .tc_tx_bytes = tc->tc_tx_bytes[i], + }; + + return veb_tc; +} + /** * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure * @pf: the PF device structure @@ -2300,8 +2333,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i40e_gstrings_veb_stats);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) - i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL, - i40e_gstrings_veb_tc_stats); + if (veb_stats) { + struct i40e_cp_veb_tc_stats veb_tc = + i40e_get_veb_tc_stats(&veb->tc_stats, i); + + i40e_add_ethtool_stats(&data, &veb_tc, + i40e_gstrings_veb_tc_stats); + } else { + i40e_add_ethtool_stats(&data, NULL, + i40e_gstrings_veb_tc_stats); + }
i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
@@ -5244,7 +5285,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, - true, addr, offset, &value, NULL); + addr, true, offset, &value, NULL); if (status) return -EIO; data[i] = value; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4a2d03cada01..7fab60128c76 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2560,8 +2560,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { - dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n", - vsi->netdev->name, + dev_info(&pf->pdev->dev, "%s allmulti mode.\n", cur_multipromisc ? "entering" : "leaving"); } } @@ -14647,12 +14646,16 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) * in order to register the netdev */ v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN); - if (v_idx < 0) + if (v_idx < 0) { + err = v_idx; goto err_switch_setup; + } pf->lan_vsi = v_idx; vsi = pf->vsi[v_idx]; - if (!vsi) + if (!vsi) { + err = -EFAULT; goto err_switch_setup; + } vsi->alloc_queue_pairs = 1; err = i40e_config_netdev(vsi); if (err) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 903d4e8cb0a1..92ce835bc79e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2198,8 +2198,7 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) * @rx_ring: Rx ring being processed * @xdp: XDP buffer containing the frame **/ -static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, - struct xdp_buff *xdp) +static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; @@ -2238,7 +2237,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, } xdp_out: rcu_read_unlock(); - return ERR_PTR(-result); + return result; }
/** @@ -2350,6 +2349,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) unsigned int xdp_xmit = 0; bool failure = false; struct xdp_buff xdp; + int xdp_res = 0;
#if (PAGE_SIZE < 8192) xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0); @@ -2416,12 +2416,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size); #endif - skb = i40e_run_xdp(rx_ring, &xdp); + xdp_res = i40e_run_xdp(rx_ring, &xdp); }
- if (IS_ERR(skb)) { - unsigned int xdp_res = -PTR_ERR(skb); - + if (xdp_res) { if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { xdp_xmit |= xdp_res; i40e_rx_buffer_flip(rx_ring, rx_buffer, size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 1b6ec9be155a..5d301a466f5c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) **/ static inline void i40e_vc_disable_vf(struct i40e_vf *vf) { + struct i40e_pf *pf = vf->pf; int i;
i40e_vc_notify_vf_reset(vf); @@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf) * ensure a reset. */ for (i = 0; i < 20; i++) { + /* If PF is in VFs releasing state reset VF is impossible, + * so leave it. + */ + if (test_bit(__I40E_VFS_RELEASING, pf->state)) + return; if (i40e_reset_vf(vf, false)) return; usleep_range(10000, 20000); @@ -1574,6 +1580,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
if (!pf->vf) return; + + set_bit(__I40E_VFS_RELEASING, pf->state); while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) usleep_range(1000, 2000);
@@ -1631,6 +1639,7 @@ void i40e_free_vfs(struct i40e_pf *pf) } } clear_bit(__I40E_VF_DISABLE, pf->state); + clear_bit(__I40E_VFS_RELEASING, pf->state); }
#ifdef CONFIG_PCI_IOV diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 37a21fb99922..7949f6b79f92 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -462,7 +462,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget); if (!nb_pkts) - return false; + return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { nb_processed = xdp_ring->count - xdp_ring->next_to_use; @@ -479,7 +479,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
- return true; + return nb_pkts < budget; }
/** diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 619d93f8b54c..f3d927320a70 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -194,7 +194,6 @@ enum ice_state { __ICE_NEEDS_RESTART, __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ - __ICE_DCBNL_DEVRESET, /* set by dcbnl devreset */ __ICE_PFR_REQ, /* set by driver and peers */ __ICE_CORER_REQ, /* set by driver and peers */ __ICE_GLOBR_REQ, /* set by driver and peers */ @@ -586,7 +585,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); const char *ice_stat_str(enum ice_status stat_err); const char *ice_aq_str(enum ice_aq_err aq_err); -bool ice_is_wol_supported(struct ice_pf *pf); +bool ice_is_wol_supported(struct ice_hw *hw); int ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, bool is_tun); @@ -604,6 +603,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf); int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, struct ice_rq_event_info *event); int ice_open(struct net_device *netdev); +int ice_open_internal(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 6d7e7dd0ebe2..836e96159a09 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -717,8 +717,8 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
if (!data) { data = devm_kcalloc(ice_hw_to_dev(hw), - sizeof(*data), ICE_AQC_FW_LOG_ID_MAX, + sizeof(*data), GFP_KERNEL); if (!data) return ICE_ERR_NO_MEMORY; diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index faaa08e8171b..68866f4f0eb0 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -31,8 +31,8 @@ enum ice_ctl_q { ICE_CTL_Q_MAILBOX, };
-/* Control Queue timeout settings - max delay 250ms */ -#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */ +/* Control Queue timeout settings - max delay 1s */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */ #define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 2a3147ee0bbb..211ac6f907ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -738,22 +738,27 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, /** * ice_cee_to_dcb_cfg * @cee_cfg: pointer to CEE configuration struct - * @dcbcfg: DCB configuration struct + * @pi: port information structure * * Convert CEE configuration from firmware to DCB configuration */ static void ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, - struct ice_dcbx_cfg *dcbcfg) + struct ice_port_info *pi) { u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift; + u8 i, j, err, sync, oper, app_index, ice_app_sel_type; u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); - u8 i, err, sync, oper, app_index, ice_app_sel_type; u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift; + struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg; u16 ice_app_prot_id_type;
- /* CEE PG data to ETS config */ + dcbcfg = &pi->qos_cfg.local_dcbx_cfg; + dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE; + dcbcfg->tlv_status = tlv_status; + + /* CEE PG data */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
/* Note that the FW creates the oper_prio_tc nibbles reversed @@ -780,10 +785,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, } }
- /* CEE PFC data to ETS config */ + /* CEE PFC data */ dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
+ /* CEE APP TLV data */ + if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) + cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg; + else + cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg; + app_index = 0; for (i = 0; i < 3; i++) { if (i == 0) { @@ -802,6 +813,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S; ice_app_sel_type = ICE_APP_SEL_TCPIP; ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI; + + for (j = 0; j < cmp_dcbcfg->numapps; j++) { + u16 prot_id = cmp_dcbcfg->app[j].prot_id; + u8 sel = cmp_dcbcfg->app[j].selector; + + if (sel == ICE_APP_SEL_TCPIP && + (prot_id == ICE_APP_PROT_ID_ISCSI || + prot_id == ICE_APP_PROT_ID_ISCSI_860)) { + ice_app_prot_id_type = prot_id; + break; + } + } } else { /* FIP APP */ ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M; @@ -850,9 +873,9 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) return ICE_ERR_PARAM;
if (dcbx_mode == ICE_DCBX_MODE_IEEE) - dcbx_cfg = &pi->local_dcbx_cfg; + dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; else if (dcbx_mode == ICE_DCBX_MODE_CEE) - dcbx_cfg = &pi->desired_dcbx_cfg; + dcbx_cfg = &pi->qos_cfg.desired_dcbx_cfg;
/* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE @@ -863,7 +886,7 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) goto out;
/* Get Remote DCB Config */ - dcbx_cfg = &pi->remote_dcbx_cfg; + dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg; ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg); /* Don't treat ENOENT as an error for Remote MIBs */ @@ -892,14 +915,11 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); if (!ret) { /* CEE mode */ - dcbx_cfg = &pi->local_dcbx_cfg; - dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE; - dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status); - ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg); ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE); + ice_cee_to_dcb_cfg(&cee_cfg, pi); } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) { /* CEE mode not enabled try querying IEEE data */ - dcbx_cfg = &pi->local_dcbx_cfg; + dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE); } @@ -916,26 +936,26 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) */ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) { - struct ice_port_info *pi = hw->port_info; + struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; enum ice_status ret = 0;
if (!hw->func_caps.common_cap.dcb) return ICE_ERR_NOT_SUPPORTED;
- pi->is_sw_lldp = true; + qos_cfg->is_sw_lldp = true;
/* Get DCBX status */ - pi->dcbx_status = ice_get_dcbx_status(hw); + qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
- if (pi->dcbx_status == ICE_DCBX_STATUS_DONE || - pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS || - pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { + if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DONE || + qos_cfg->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS || + qos_cfg->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { /* Get current DCBX configuration */ - ret = ice_get_dcb_cfg(pi); + ret = ice_get_dcb_cfg(hw->port_info); if (ret) return ret; - pi->is_sw_lldp = false; - } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) { + qos_cfg->is_sw_lldp = false; + } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) { return ICE_ERR_NOT_READY; }
@@ -943,7 +963,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) if (enable_mib_change) { ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); if (ret) - pi->is_sw_lldp = true; + qos_cfg->is_sw_lldp = true; }
return ret; @@ -958,21 +978,21 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) */ enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) { - struct ice_port_info *pi = hw->port_info; + struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; enum ice_status ret;
if (!hw->func_caps.common_cap.dcb) return ICE_ERR_NOT_SUPPORTED;
/* Get DCBX status */ - pi->dcbx_status = ice_get_dcbx_status(hw); + qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
- if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) + if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) return ICE_ERR_NOT_READY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); if (!ret) - pi->is_sw_lldp = !ena_mib; + qos_cfg->is_sw_lldp = !ena_mib;
return ret; } @@ -1270,7 +1290,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) hw = pi->hw;
/* update the HW local config */ - dcbcfg = &pi->local_dcbx_cfg; + dcbcfg = &pi->qos_cfg.local_dcbx_cfg; /* Allocate the LLDPDU */ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); if (!lldpmib) diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 36abd6b7280c..1e8f71ffc8ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -28,7 +28,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) return;
- dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
ice_for_each_traffic_class(i) if (vsi->tc_cfg.ena_tc & BIT(i)) @@ -134,7 +134,7 @@ static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host) else mode = DCB_CAP_DCBX_LLD_MANAGED;
- if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE) + if (port_info->qos_cfg.local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE) return mode | DCB_CAP_DCBX_VER_CEE; else return mode | DCB_CAP_DCBX_VER_IEEE; @@ -277,10 +277,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) int ret = ICE_DCB_NO_HW_CHG; struct ice_vsi *pf_vsi;
- curr_cfg = &pf->hw.port_info->local_dcbx_cfg; + curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
/* FW does not care if change happened */ - if (!pf->hw.port_info->is_sw_lldp) + if (!pf->hw.port_info->qos_cfg.is_sw_lldp) ret = ICE_DCB_HW_CHG_RST;
/* Enable DCB tagging only when more than one TC */ @@ -327,7 +327,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) /* Only send new config to HW if we are in SW LLDP mode. Otherwise, * the new config came from the HW in the first place. */ - if (pf->hw.port_info->is_sw_lldp) { + if (pf->hw.port_info->qos_cfg.is_sw_lldp) { ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { dev_err(dev, "Set DCB Config failed\n"); @@ -360,7 +360,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) */ static void ice_cfg_etsrec_defaults(struct ice_port_info *pi) { - struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg; + struct ice_dcbx_cfg *dcbcfg = &pi->qos_cfg.local_dcbx_cfg; u8 i;
/* Ensure ETS recommended DCB configuration is not already set */ @@ -446,7 +446,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
mutex_lock(&pf->tc_mutex);
- if (!pf->hw.port_info->is_sw_lldp) + if (!pf->hw.port_info->qos_cfg.is_sw_lldp) ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info); @@ -455,9 +455,9 @@ void ice_dcb_rebuild(struct ice_pf *pf) goto dcb_error; }
- if (!pf->hw.port_info->is_sw_lldp) { + if (!pf->hw.port_info->qos_cfg.is_sw_lldp) { ret = ice_cfg_lldp_mib_change(&pf->hw, true); - if (ret && !pf->hw.port_info->is_sw_lldp) { + if (ret && !pf->hw.port_info->qos_cfg.is_sw_lldp) { dev_err(dev, "Failed to register for MIB changes\n"); goto dcb_error; } @@ -510,11 +510,12 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) int ret = 0;
pi = pf->hw.port_info; - newcfg = kmemdup(&pi->local_dcbx_cfg, sizeof(*newcfg), GFP_KERNEL); + newcfg = kmemdup(&pi->qos_cfg.local_dcbx_cfg, sizeof(*newcfg), + GFP_KERNEL); if (!newcfg) return -ENOMEM;
- memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg)); + memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*newcfg));
dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n"); if (ice_pf_dcb_cfg(pf, newcfg, locked)) @@ -545,7 +546,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) if (!dcbcfg) return -ENOMEM;
- memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg)); + memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*dcbcfg));
dcbcfg->etscfg.willing = ets_willing ? 1 : 0; dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc; @@ -608,7 +609,7 @@ static bool ice_dcb_tc_contig(u8 *prio_table) */ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) { - struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; struct device *dev = ice_pf_to_dev(pf); int ret;
@@ -638,7 +639,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) */ void ice_pf_dcb_recfg(struct ice_pf *pf) { - struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; u8 tc_map = 0; int v, ret;
@@ -691,7 +692,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) port_info = hw->port_info;
err = ice_init_dcb(hw, false); - if (err && !port_info->is_sw_lldp) { + if (err && !port_info->qos_cfg.is_sw_lldp) { dev_err(dev, "Error initializing DCB %d\n", err); goto dcb_init_err; } @@ -858,7 +859,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, /* Update the remote cached instance and return */ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, - &pi->remote_dcbx_cfg); + &pi->qos_cfg.remote_dcbx_cfg); if (ret) { dev_err(dev, "Failed to get remote DCB config\n"); return; @@ -868,10 +869,11 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, mutex_lock(&pf->tc_mutex);
/* store the old configuration */ - tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg; + tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg;
/* Reset the old DCBX configuration data */ - memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg)); + memset(&pi->qos_cfg.local_dcbx_cfg, 0, + sizeof(pi->qos_cfg.local_dcbx_cfg));
/* Get updated DCBX data from firmware */ ret = ice_get_dcb_cfg(pf->hw.port_info); @@ -881,7 +883,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, }
/* No change detected in DCBX configs */ - if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) { + if (!memcmp(&tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg, + sizeof(tmp_dcbx_cfg))) { dev_dbg(dev, "No change detected in DCBX configuration.\n"); goto out; } @@ -889,13 +892,13 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, pf->dcbx_cap = ice_dcb_get_mode(pi, false);
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg, - &pi->local_dcbx_cfg); - ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg); + &pi->qos_cfg.local_dcbx_cfg); + ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg); if (!need_reconfig) goto out;
/* Enable DCB tagging only when more than one TC */ - if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) { + if (ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg) > 1) { dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n"); set_bit(ICE_FLAG_DCB_ENA, pf->flags); } else { diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 8c133a8be6ad..4180f1f35fb8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -18,12 +18,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev) while (ice_is_reset_in_progress(pf->state)) usleep_range(1000, 2000);
- set_bit(__ICE_DCBNL_DEVRESET, pf->state); dev_close(netdev); netdev_state_change(netdev); dev_open(netdev, NULL); netdev_state_change(netdev); - clear_bit(__ICE_DCBNL_DEVRESET, pf->state); }
/** @@ -34,12 +32,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev) static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets) { struct ice_dcbx_cfg *dcbxcfg; - struct ice_port_info *pi; struct ice_pf *pf;
pf = ice_netdev_to_pf(netdev); - pi = pf->hw.port_info; - dcbxcfg = &pi->local_dcbx_cfg; + dcbxcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
ets->willing = dcbxcfg->etscfg.willing; ets->ets_cap = dcbxcfg->etscfg.maxtcs; @@ -74,7 +70,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
@@ -159,6 +155,7 @@ static u8 ice_dcbnl_getdcbx(struct net_device *netdev) static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) { struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_qos_cfg *qos_cfg;
/* if FW LLDP agent is running, DCBNL not allowed to change mode */ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) @@ -175,10 +172,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) return ICE_DCB_NO_HW_CHG;
pf->dcbx_cap = mode; + qos_cfg = &pf->hw.port_info->qos_cfg; if (mode & DCB_CAP_DCBX_VER_CEE) - pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; + qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; else - pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; + qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); return ICE_DCB_HW_CHG_RST; @@ -229,7 +227,7 @@ static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc) struct ice_dcbx_cfg *dcbxcfg; int i;
- dcbxcfg = &pi->local_dcbx_cfg; + dcbxcfg = &pi->qos_cfg.local_dcbx_cfg; pfc->pfc_cap = dcbxcfg->pfc.pfccap; pfc->pfc_en = dcbxcfg->pfc.pfcena; pfc->mbc = dcbxcfg->pfc.mbc; @@ -260,7 +258,7 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
mutex_lock(&pf->tc_mutex);
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
if (pfc->pfc_cap) new_cfg->pfc.pfccap = pfc->pfc_cap; @@ -297,9 +295,9 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) if (prio >= ICE_MAX_USER_PRIORITY) return;
- *setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1; + *setting = (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena >> prio) & 0x1; dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n", - prio, *setting, pi->local_dcbx_cfg.pfc.pfcena); + prio, *setting, pi->qos_cfg.local_dcbx_cfg.pfc.pfcena); }
/** @@ -320,7 +318,7 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set) if (prio >= ICE_MAX_USER_PRIORITY) return;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc; if (set) @@ -342,7 +340,7 @@ static u8 ice_dcbnl_getpfcstate(struct net_device *netdev) struct ice_port_info *pi = pf->hw.port_info;
/* Return enabled if any UP enabled for PFC */ - if (pi->local_dcbx_cfg.pfc.pfcena) + if (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena) return 1;
return 0; @@ -382,8 +380,8 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
if (state) { set_bit(ICE_FLAG_DCB_ENA, pf->flags); - memcpy(&pf->hw.port_info->desired_dcbx_cfg, - &pf->hw.port_info->local_dcbx_cfg, + memcpy(&pf->hw.port_info->qos_cfg.desired_dcbx_cfg, + &pf->hw.port_info->qos_cfg.local_dcbx_cfg, sizeof(struct ice_dcbx_cfg)); } else { clear_bit(ICE_FLAG_DCB_ENA, pf->flags); @@ -417,7 +415,7 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio, if (prio >= ICE_MAX_USER_PRIORITY) return;
- *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio]; + *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio]; dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio, *pgid); } @@ -448,7 +446,7 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, if (tc >= ICE_MAX_TRAFFIC_CLASS) return;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
/* prio_type, bwg_id and bw_pct per UP are not supported */
@@ -478,7 +476,7 @@ ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct) if (pgid >= ICE_MAX_TRAFFIC_CLASS) return;
- *bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid]; + *bw_pct = pi->qos_cfg.local_dcbx_cfg.etscfg.tcbwtable[pgid]; dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n", pgid, *bw_pct); } @@ -502,7 +500,7 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct) if (pgid >= ICE_MAX_TRAFFIC_CLASS) return;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->etscfg.tcbwtable[pgid] = bw_pct; } @@ -532,7 +530,7 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, if (prio >= ICE_MAX_USER_PRIORITY) return;
- *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio]; + *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio]; }
/** @@ -703,9 +701,9 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
mutex_lock(&pf->tc_mutex);
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
- old_cfg = &pf->hw.port_info->local_dcbx_cfg; + old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
if (old_cfg->numapps == ICE_DCBX_MAX_APPS) { ret = -EINVAL; @@ -755,7 +753,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) return -EINVAL;
mutex_lock(&pf->tc_mutex); - old_cfg = &pf->hw.port_info->local_dcbx_cfg; + old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
if (old_cfg->numapps <= 1) goto delapp_out; @@ -764,7 +762,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) if (ret) goto delapp_out;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
for (i = 1; i < new_cfg->numapps; i++) { if (app->selector == new_cfg->app[i].selector && @@ -817,7 +815,7 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return ICE_DCB_NO_HW_CHG;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
@@ -888,7 +886,7 @@ void ice_dcbnl_set_all(struct ice_vsi *vsi) if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags)) return;
- dcbxcfg = &pi->local_dcbx_cfg; + dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
for (i = 0; i < dcbxcfg->numapps; i++) { u8 prio, tc_map; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index aebebd2102da..d70573f5072c 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2986,7 +2986,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) pause->rx_pause = 0; pause->tx_pause = 0;
- dcbx_cfg = &pi->local_dcbx_cfg; + dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) @@ -3038,7 +3038,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pi = vsi->port_info; hw_link_info = &pi->phy.link_info; - dcbx_cfg = &pi->local_dcbx_cfg; + dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
/* Changing the port's flow control is not supported if this isn't the @@ -3472,7 +3472,7 @@ static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
/* Get WoL settings based on the HW capability */ - if (ice_is_wol_supported(pf)) { + if (ice_is_wol_supported(&pf->hw)) { wol->supported = WAKE_MAGIC; wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0; } else { @@ -3492,7 +3492,7 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back;
- if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf)) + if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw)) return -EOPNOTSUPP;
/* only magic packet is supported */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index ad9c22a1b97a..170367eaa95a 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2078,7 +2078,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) { - struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg; + struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); @@ -2489,7 +2489,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) if (!locked) rtnl_lock();
- err = ice_open(vsi->netdev); + err = ice_open_internal(vsi->netdev);
if (!locked) rtnl_unlock(); @@ -2518,7 +2518,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) if (!locked) rtnl_lock();
- ice_stop(vsi->netdev); + ice_vsi_close(vsi);
if (!locked) rtnl_unlock(); @@ -2944,7 +2944,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) bool ice_is_reset_in_progress(unsigned long *state) { return test_bit(__ICE_RESET_OICR_RECV, state) || - test_bit(__ICE_DCBNL_DEVRESET, state) || test_bit(__ICE_PFR_REQ, state) || test_bit(__ICE_CORER_REQ, state) || test_bit(__ICE_GLOBR_REQ, state); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index e10ca8929f85..00a2520395c5 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3512,15 +3512,14 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) }
/** - * ice_is_wol_supported - get NVM state of WoL - * @pf: board private structure + * ice_is_wol_supported - check if WoL is supported + * @hw: pointer to hardware info * * Check if WoL is supported based on the HW configuration. * Returns true if NVM supports and enables WoL for this port, false otherwise */ -bool ice_is_wol_supported(struct ice_pf *pf) +bool ice_is_wol_supported(struct ice_hw *hw) { - struct ice_hw *hw = &pf->hw; u16 wol_ctrl;
/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control @@ -3529,7 +3528,7 @@ bool ice_is_wol_supported(struct ice_pf *pf) if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) return false;
- return !(BIT(hw->pf_id) & wol_ctrl); + return !(BIT(hw->port_info->lport) & wol_ctrl); }
/** @@ -4167,28 +4166,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_send_version_unroll; }
+ /* not a fatal error if this fails */ err = ice_init_nvm_phy_type(pf->hw.port_info); - if (err) { + if (err) dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); - goto err_send_version_unroll; - }
+ /* not a fatal error if this fails */ err = ice_update_link_info(pf->hw.port_info); - if (err) { + if (err) dev_err(dev, "ice_update_link_info failed: %d\n", err); - goto err_send_version_unroll; - }
ice_init_link_dflt_override(pf->hw.port_info);
/* if media available, initialize PHY settings */ if (pf->hw.port_info->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + /* not a fatal error if this fails */ err = ice_init_phy_user_cfg(pf->hw.port_info); - if (err) { + if (err) dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); - goto err_send_version_unroll; - }
if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { struct ice_vsi *vsi = ice_get_main_vsi(pf); @@ -4539,6 +4535,7 @@ static int __maybe_unused ice_suspend(struct device *dev) continue; ice_vsi_free_q_vectors(pf->vsi[v]); } + ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); ice_clear_interrupt_scheme(pf);
pci_save_state(pdev); @@ -6613,6 +6610,28 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) * Returns 0 on success, negative value on failure */ int ice_open(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + + if (ice_is_reset_in_progress(pf->state)) { + netdev_err(netdev, "can't open net device while reset is in progress"); + return -EBUSY; + } + + return ice_open_internal(netdev); +} + +/** + * ice_open_internal - Called when a network interface becomes active + * @netdev: network interface device structure + * + * Internal ice_open implementation. Should not be used directly except for ice_open and reset + * handling routine + * + * Returns 0 on success, negative value on failure + */ +int ice_open_internal(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -6693,6 +6712,12 @@ int ice_stop(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + + if (ice_is_reset_in_progress(pf->state)) { + netdev_err(netdev, "can't stop net device while reset is in progress"); + return -EBUSY; + }
ice_vsi_close(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index c33612132ddf..0a8fcd4309ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1238,6 +1238,9 @@ ice_add_update_vsi_list(struct ice_hw *hw, ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, vsi_list_id);
+ if (!m_entry->vsi_list_info) + return ICE_ERR_NO_MEMORY; + /* If this entry was large action then the large action needs * to be updated to point to FWD to VSI list */ @@ -2220,6 +2223,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && fm_entry->fltr_info.vsi_handle == vsi_handle) || (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && + fm_entry->vsi_list_info && (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); }
@@ -2292,14 +2296,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, return ICE_ERR_PARAM;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) { - struct ice_fltr_info *fi; - - fi = &fm_entry->fltr_info; - if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) + if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) continue;
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, - vsi_list_head, fi); + vsi_list_head, + &fm_entry->fltr_info); if (status) return status; } @@ -2622,7 +2624,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, &remove_list_head); mutex_unlock(rule_lock); if (status) - return; + goto free_fltr_list;
switch (lkup) { case ICE_SW_LKUP_MAC: @@ -2645,6 +2647,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, break; }
+free_fltr_list: list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { list_del(&fm_entry->list_entry); devm_kfree(ice_hw_to_dev(hw), fm_entry); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index b6fa83c619dd..4cd3142ec20a 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2421,7 +2421,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ if (unlikely(skb->priority == TC_PRIO_CONTROL && vsi->type == ICE_VSI_PF && - vsi->port_info->is_sw_lldp)) + vsi->port_info->qos_cfg.is_sw_lldp)) offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 2226a291a394..1bed183d96a0 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -493,6 +493,7 @@ struct ice_dcb_app_priority_table { #define ICE_TLV_STATUS_ERR 0x4 #define ICE_APP_PROT_ID_FCOE 0x8906 #define ICE_APP_PROT_ID_ISCSI 0x0cbc +#define ICE_APP_PROT_ID_ISCSI_860 0x035c #define ICE_APP_PROT_ID_FIP 0x8914 #define ICE_APP_SEL_ETHTYPE 0x1 #define ICE_APP_SEL_TCPIP 0x2 @@ -514,6 +515,14 @@ struct ice_dcbx_cfg { #define ICE_DCBX_APPS_NON_WILLING 0x1 };
+struct ice_qos_cfg { + struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ + struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */ + struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ + u8 dcbx_status : 3; /* see ICE_DCBX_STATUS_DIS */ + u8 is_sw_lldp : 1; +}; + struct ice_port_info { struct ice_sched_node *root; /* Root Node per Port */ struct ice_hw *hw; /* back pointer to HW instance */ @@ -537,13 +546,7 @@ struct ice_port_info { sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM]; /* List contain profile ID(s) and other params per layer */ struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM]; - struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ - /* DCBX info */ - struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ - struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */ - /* LLDP/DCBX Status */ - u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */ - u8 is_sw_lldp:1; + struct ice_qos_cfg qos_cfg; u8 is_vf:1; };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index b051417ede67..9153c9bda96f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -191,12 +191,12 @@ static bool is_ib_supported(struct mlx5_core_dev *dev) }
enum { - MLX5_INTERFACE_PROTOCOL_ETH_REP, MLX5_INTERFACE_PROTOCOL_ETH, + MLX5_INTERFACE_PROTOCOL_ETH_REP,
+ MLX5_INTERFACE_PROTOCOL_IB, MLX5_INTERFACE_PROTOCOL_IB_REP, MLX5_INTERFACE_PROTOCOL_MPIB, - MLX5_INTERFACE_PROTOCOL_IB,
MLX5_INTERFACE_PROTOCOL_VNET, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f258f2f9b8cf..9061a30a93bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -510,6 +510,7 @@ struct mlx5e_icosq { struct mlx5_wq_cyc wq; void __iomem *uar_map; u32 sqn; + u16 reserved_room; unsigned long state;
/* control path */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index b42396df3111..0469f53dfb99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -184,6 +184,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry) return !!(entry->tuple_nat_node.next); }
+static int +mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv, + u32 *labels, u32 *id) +{ + if (!memchr_inv(labels, 0, sizeof(u32) * 4)) { + *id = 0; + return 0; + } + + if (mapping_add(ct_priv->labels_mapping, labels, id)) + return -EOPNOTSUPP; + + return 0; +} + +static void +mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id) +{ + if (id) + mapping_remove(ct_priv->labels_mapping, id); +} + static int mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule) { @@ -435,7 +457,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr); mlx5e_mod_hdr_detach(ct_priv->dev, ct_priv->mod_hdr_tbl, zone_rule->mh); - mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); + mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); kfree(attr); }
@@ -638,8 +660,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, if (!meta) return -EOPNOTSUPP;
- err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels, - &attr->ct_attr.ct_labels_id); + err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels, + &attr->ct_attr.ct_labels_id); if (err) return -EOPNOTSUPP; if (nat) { @@ -675,7 +697,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
err_mapping: dealloc_mod_hdr_actions(&mod_acts); - mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); + mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); return err; }
@@ -743,7 +765,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, err_rule: mlx5e_mod_hdr_detach(ct_priv->dev, ct_priv->mod_hdr_tbl, zone_rule->mh); - mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); + mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); err_mod_hdr: kfree(attr); err_attr: @@ -1198,7 +1220,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_ if (!priv || !ct_attr->ct_labels_id) return;
- mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id); + mlx5_put_label_mapping(priv, ct_attr->ct_labels_id); }
int @@ -1276,7 +1298,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1]; ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2]; ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3]; - if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id)) + if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id)) return -EOPNOTSUPP; mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id, MLX5_CT_LABELS_MASK); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 4880f2179273..05d673e5289d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -434,4 +434,10 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size) return wqe_size * 2 - 1; }
+static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size) +{ + u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size); + + return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room); +} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index d06532d0baa4..c0bd4e55ed8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -137,11 +137,10 @@ post_static_params(struct mlx5e_icosq *sq, { struct mlx5e_set_tls_static_params_wqe *wqe; struct mlx5e_icosq_wqe_info wi; - u16 pi, num_wqebbs, room; + u16 pi, num_wqebbs;
num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; - room = mlx5e_stop_room_for_wqe(num_wqebbs); - if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room))) + if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) return ERR_PTR(-ENOSPC);
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); @@ -168,11 +167,10 @@ post_progress_params(struct mlx5e_icosq *sq, { struct mlx5e_set_tls_progress_params_wqe *wqe; struct mlx5e_icosq_wqe_info wi; - u16 pi, num_wqebbs, room; + u16 pi, num_wqebbs;
num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; - room = mlx5e_stop_room_for_wqe(num_wqebbs); - if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room))) + if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) return ERR_PTR(-ENOSPC);
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); @@ -277,17 +275,15 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
buf->priv_rx = priv_rx;
- BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1); - spin_lock_bh(&sq->channel->async_icosq_lock);
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { + if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) { spin_unlock_bh(&sq->channel->async_icosq_lock); err = -ENOSPC; goto err_dma_unmap; }
- pi = mlx5e_icosq_get_next_pi(sq, 1); + pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS); wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
#define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)) @@ -307,7 +303,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
wi = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS, - .num_wqebbs = 1, + .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS, .tls_get_params.buf = buf, }; icosq_fill_wi(sq, pi, &wi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c9d01e705ab2..d3d532fdf04e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -747,11 +747,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev, return 0; }
-static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings, - u32 eth_proto_cap, - u8 connector_type, bool ext) +static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev, + struct ethtool_link_ksettings *link_ksettings, + u32 eth_proto_cap, u8 connector_type) { - if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { + if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) { if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) @@ -887,9 +887,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = { [MLX5E_PORT_OTHER] = PORT_OTHER, };
-static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext) +static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type) { - if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) return ptys2connector_type[connector_type];
if (eth_proto & @@ -990,11 +990,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; - - link_ksettings->base.port = get_connector_port(eth_proto_oper, - connector_type, ext); - ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, - connector_type, ext); + connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ? + connector_type : MLX5E_PORT_UNKNOWN; + link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type); + ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin, + connector_type); get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b6324d11a008..7bb189e65628 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1058,6 +1058,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -2299,6 +2300,24 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv, mlx5e_build_ico_cq_param(priv, log_wq_size, ¶m->cqp); }
+static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv, + struct mlx5e_params *params, + u8 log_wq_size, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + mlx5e_build_sq_param_common(priv, param); + + /* async_icosq is used by XSK only if xdp_prog is active */ + if (params->xdp_prog) + param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */ + MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq)); + MLX5_SET(wq, wq, log_wq_sz, log_wq_size); + mlx5e_build_ico_cq_param(priv, log_wq_size, ¶m->cqp); +} + void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_sq_param *param) @@ -2347,7 +2366,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, mlx5e_build_sq_param(priv, params, &cparam->txq_sq); mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq); - mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq); + mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq); }
int mlx5e_open_channels(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index f0ceae65f6cf..8afbb485197e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1103,8 +1103,9 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_rep_tc_enable(priv);
- mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, - 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); + if (MLX5_CAP_GEN(mdev, uplink_follow)) + mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, + 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); mlx5_lag_add(mdev, netdev); priv->events_nb.notifier_call = uplink_rep_async_event; mlx5_notifier_register(mdev, &priv->events_nb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index fc0afa03d407..b5f48efebd71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -928,13 +928,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) mutex_unlock(&table->lock); }
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +#define MLX5_MAX_ASYNC_EQS 4 +#else +#define MLX5_MAX_ASYNC_EQS 3 +#endif + int mlx5_eq_table_create(struct mlx5_core_dev *dev) { struct mlx5_eq_table *eq_table = dev->priv.eq_table; + int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? + MLX5_CAP_GEN(dev, max_num_eqs) : + 1 << MLX5_CAP_GEN(dev, log_max_eq); int err;
eq_table->num_comp_eqs = - mlx5_irq_get_num_comp(eq_table->irq_table); + min_t(int, + mlx5_irq_get_num_comp(eq_table->irq_table), + num_eqs - MLX5_MAX_ASYNC_EQS);
err = create_async_eqs(dev); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a6956cfc9cb1..4399c9a4999d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -21,6 +21,7 @@ #include <net/red.h> #include <net/vxlan.h> #include <net/flow_offload.h> +#include <net/inet_ecn.h>
#include "port.h" #include "core.h" @@ -346,6 +347,20 @@ struct mlxsw_sp_port_type_speed_ops { u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap); };
+static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn, + bool *trap_en) +{ + bool set_ce = false; + + *trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce); + if (set_ce) + return INET_ECN_CE; + else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0) + return INET_ECN_ECT_1; + else + return inner_ecn; +} + static inline struct net_device * mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 6ccca39bae84..64a8f838eb53 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -335,12 +335,11 @@ static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp, u8 inner_ecn, u8 outer_ecn) { char tidem_pl[MLXSW_REG_TIDEM_LEN]; - bool trap_en, set_ce = false; u8 new_inner_ecn; + bool trap_en;
- trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce); - new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn; - + new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn, + &trap_en); mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn, trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index e5ec595593f4..9eba8fa684ae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c @@ -909,12 +909,11 @@ static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp, u8 inner_ecn, u8 outer_ecn) { char tndem_pl[MLXSW_REG_TNDEM_LEN]; - bool trap_en, set_ce = false; u8 new_inner_ecn; + bool trap_en;
- trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce); - new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn; - + new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn, + &trap_en); mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn, trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 1634ca6d4a8f..c84c8bf2bc20 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -2897,7 +2897,7 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, dev_kfree_skb_any(curr); if (segs != NULL) { curr = segs; - segs = segs->next; + segs = next; curr->next = NULL; dev_kfree_skb_any(segs); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 0e2db6ea79e9..2ec62c8d86e1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) dev_consume_skb_any(skb); else dev_kfree_skb_any(skb); + return; }
nfp_ccm_rx(&bpf->ccm, skb); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index caf12eec9945..56833a41f3d2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -190,6 +190,7 @@ struct nfp_fl_internal_ports { * @qos_rate_limiters: Current active qos rate limiters * @qos_stats_lock: Lock on qos stats updates * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded + * @merge_table: Hash table to store merged flows */ struct nfp_flower_priv { struct nfp_app *app; @@ -223,6 +224,7 @@ struct nfp_flower_priv { unsigned int qos_rate_limiters; spinlock_t qos_stats_lock; /* Protect the qos stats */ int pre_tun_rule_cnt; + struct rhashtable merge_table; };
/** @@ -350,6 +352,12 @@ struct nfp_fl_payload_link { };
extern const struct rhashtable_params nfp_flower_table_params; +extern const struct rhashtable_params merge_table_params; + +struct nfp_merge_info { + u64 parent_ctx; + struct rhash_head ht_node; +};
struct nfp_fl_stats_frame { __be32 stats_con_id; diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index aa06fcb38f8b..327bb56b3ef5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -490,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = { .automatic_shrinking = true, };
+const struct rhashtable_params merge_table_params = { + .key_offset = offsetof(struct nfp_merge_info, parent_ctx), + .head_offset = offsetof(struct nfp_merge_info, ht_node), + .key_len = sizeof(u64), +}; + int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, unsigned int host_num_mems) { @@ -506,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, if (err) goto err_free_flow_table;
+ err = rhashtable_init(&priv->merge_table, &merge_table_params); + if (err) + goto err_free_stats_ctx_table; + get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */ @@ -513,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); if (!priv->mask_ids.mask_id_free_list.buf) - goto err_free_stats_ctx_table; + goto err_free_merge_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -550,6 +560,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); +err_free_merge_table: + rhashtable_destroy(&priv->merge_table); err_free_stats_ctx_table: rhashtable_destroy(&priv->stats_ctx_table); err_free_flow_table: @@ -568,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) nfp_check_rhashtable_empty, NULL); rhashtable_free_and_destroy(&priv->stats_ctx_table, nfp_check_rhashtable_empty, NULL); + rhashtable_free_and_destroy(&priv->merge_table, + nfp_check_rhashtable_empty, NULL); kvfree(priv->stats); kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.last_used); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index d72225d64a75..e95969c462e4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1009,6 +1009,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *merge_flow; struct nfp_fl_key_ls merge_key_ls; + struct nfp_merge_info *merge_info; + u64 parent_ctx = 0; int err;
ASSERT_RTNL(); @@ -1019,6 +1021,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, nfp_flower_is_merge_flow(sub_flow2)) return -EINVAL;
+ /* check if the two flows are already merged */ + parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32; + parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id)); + if (rhashtable_lookup_fast(&priv->merge_table, + &parent_ctx, merge_table_params)) { + nfp_flower_cmsg_warn(app, "The two flows are already merged.\n"); + return 0; + } + err = nfp_flower_can_merge(sub_flow1, sub_flow2); if (err) return err; @@ -1060,16 +1071,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, if (err) goto err_release_metadata;
+ merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL); + if (!merge_info) { + err = -ENOMEM; + goto err_remove_rhash; + } + merge_info->parent_ctx = parent_ctx; + err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node, + merge_table_params); + if (err) + goto err_destroy_merge_info; + err = nfp_flower_xmit_flow(app, merge_flow, NFP_FLOWER_CMSG_TYPE_FLOW_MOD); if (err) - goto err_remove_rhash; + goto err_remove_merge_info;
merge_flow->in_hw = true; sub_flow1->in_hw = false;
return 0;
+err_remove_merge_info: + WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, + &merge_info->ht_node, + merge_table_params)); +err_destroy_merge_info: + kfree(merge_info); err_remove_rhash: WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, &merge_flow->fl_node, @@ -1359,7 +1387,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app, { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload_link *link, *temp; + struct nfp_merge_info *merge_info; struct nfp_fl_payload *origin; + u64 parent_ctx = 0; bool mod = false; int err;
@@ -1396,8 +1426,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app, err_free_links: /* Clean any links connected with the merged flow. */ list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, - merge_flow.list) + merge_flow.list) { + u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id); + + parent_ctx = (parent_ctx << 32) | (u64)(ctx_id); nfp_flower_unlink_flow(link); + } + + merge_info = rhashtable_lookup_fast(&priv->merge_table, + &parent_ctx, + merge_table_params); + if (merge_info) { + WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, + &merge_info->ht_node, + merge_table_params)); + kfree(merge_info); + }
kfree(merge_flow->action_data); kfree(merge_flow->mask_data); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 5523f069b9a5..f35b0b83fe85 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -908,8 +908,16 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
info = skb_tunnel_info(skb); if (info) { - info->key.u.ipv4.dst = fl4.saddr; - info->key.u.ipv4.src = fl4.daddr; + struct ip_tunnel_info *unclone; + + unclone = skb_tunnel_info_unclone(skb); + if (unlikely(!unclone)) { + dst_release(&rt->dst); + return -ENOMEM; + } + + unclone->key.u.ipv4.dst = fl4.saddr; + unclone->key.u.ipv4.src = fl4.daddr; }
if (!pskb_may_pull(skb, ETH_HLEN)) { @@ -993,8 +1001,16 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct ip_tunnel_info *info = skb_tunnel_info(skb);
if (info) { - info->key.u.ipv6.dst = fl6.saddr; - info->key.u.ipv6.src = fl6.daddr; + struct ip_tunnel_info *unclone; + + unclone = skb_tunnel_info_unclone(skb); + if (unlikely(!unclone)) { + dst_release(dst); + return -ENOMEM; + } + + unclone->key.u.ipv6.dst = fl6.saddr; + unclone->key.u.ipv6.src = fl6.daddr; }
if (!pskb_may_pull(skb, ETH_HLEN)) { diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 0dd0ba915ab9..23ee0b14cbfa 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n) return -ENOMEM; } usb_anchor_urb(urb, &atusb->idle_urbs); + usb_free_urb(urb); n--; } return 0; diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 53282a6d5928..287cccf8f7f4 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
int bcm_phy_set_eee(struct phy_device *phydev, bool enable) { - int val; + int val, mask = 0;
/* Enable EEE at PHY level */ val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL); @@ -388,10 +388,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable) if (val < 0) return val;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported)) + mask |= MDIO_EEE_1000T; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phydev->supported)) + mask |= MDIO_EEE_100TX; + if (enable) - val |= (MDIO_EEE_100TX | MDIO_EEE_1000T); + val |= mask; else - val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T); + val &= ~mask;
phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 5512418b7be0..2ed54818dcbc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -69,6 +69,14 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/mutex.h> +#include <linux/ieee802154.h> +#include <linux/if_ltalk.h> +#include <uapi/linux/if_fddi.h> +#include <uapi/linux/if_hippi.h> +#include <uapi/linux/if_fc.h> +#include <net/ax25.h> +#include <net/rose.h> +#include <net/6lowpan.h>
#include <linux/uaccess.h> #include <linux/proc_fs.h> @@ -2925,6 +2933,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, return __tun_set_ebpf(tun, prog_p, prog); }
+/* Return correct value for tun->dev->addr_len based on tun->dev->type. */ +static unsigned char tun_get_addr_len(unsigned short type) +{ + switch (type) { + case ARPHRD_IP6GRE: + case ARPHRD_TUNNEL6: + return sizeof(struct in6_addr); + case ARPHRD_IPGRE: + case ARPHRD_TUNNEL: + case ARPHRD_SIT: + return 4; + case ARPHRD_ETHER: + return ETH_ALEN; + case ARPHRD_IEEE802154: + case ARPHRD_IEEE802154_MONITOR: + return IEEE802154_EXTENDED_ADDR_LEN; + case ARPHRD_PHONET_PIPE: + case ARPHRD_PPP: + case ARPHRD_NONE: + return 0; + case ARPHRD_6LOWPAN: + return EUI64_ADDR_LEN; + case ARPHRD_FDDI: + return FDDI_K_ALEN; + case ARPHRD_HIPPI: + return HIPPI_ALEN; + case ARPHRD_IEEE802: + return FC_ALEN; + case ARPHRD_ROSE: + return ROSE_ADDR_LEN; + case ARPHRD_NETROM: + return AX25_ADDR_LEN; + case ARPHRD_LOCALTLK: + return LTALK_ALEN; + default: + return 0; + } +} + static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { @@ -3088,6 +3135,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, break; } tun->dev->type = (int) arg; + tun->dev->addr_len = tun_get_addr_len(tun->dev->type); netif_info(tun, drv, tun->dev, "linktype set to %d\n", tun->dev->type); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 2bb28db89432..d18642a8144c 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index) return serial; }
-static int get_free_serial_index(void) +static int obtain_minor(struct hso_serial *serial) { int index; unsigned long flags; @@ -619,8 +619,10 @@ static int get_free_serial_index(void) spin_lock_irqsave(&serial_table_lock, flags); for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { if (serial_table[index] == NULL) { + serial_table[index] = serial->parent; + serial->minor = index; spin_unlock_irqrestore(&serial_table_lock, flags); - return index; + return 0; } } spin_unlock_irqrestore(&serial_table_lock, flags); @@ -629,15 +631,12 @@ static int get_free_serial_index(void) return -1; }
-static void set_serial_by_index(unsigned index, struct hso_serial *serial) +static void release_minor(struct hso_serial *serial) { unsigned long flags;
spin_lock_irqsave(&serial_table_lock, flags); - if (serial) - serial_table[index] = serial->parent; - else - serial_table[index] = NULL; + serial_table[serial->minor] = NULL; spin_unlock_irqrestore(&serial_table_lock, flags); }
@@ -2230,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) static void hso_serial_tty_unregister(struct hso_serial *serial) { tty_unregister_device(tty_drv, serial->minor); + release_minor(serial); }
static void hso_serial_common_free(struct hso_serial *serial) @@ -2253,24 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial) static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, int rx_size, int tx_size) { - int minor; int i;
tty_port_init(&serial->port);
- minor = get_free_serial_index(); - if (minor < 0) + if (obtain_minor(serial)) goto exit2;
/* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, - tty_drv, minor, &serial->parent->interface->dev, + tty_drv, serial->minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); - if (IS_ERR(serial->parent->dev)) + if (IS_ERR(serial->parent->dev)) { + release_minor(serial); goto exit2; + }
- /* fill in specific data for later use */ - serial->minor = minor; serial->magic = HSO_SERIAL_MAGIC; spin_lock_init(&serial->serial_lock); serial->num_rx_urbs = num_urbs; @@ -2667,9 +2665,6 @@ static struct hso_device *hso_create_bulk_serial_device(
serial->write_data = hso_std_serial_write_data;
- /* and record this serial */ - set_serial_by_index(serial->minor, serial); - /* setup the proc dirs and files if needed */ hso_log_port(hso_dev);
@@ -2726,9 +2721,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, serial->shared_int->ref_count++; mutex_unlock(&serial->shared_int->shared_int_lock);
- /* and record this serial */ - set_serial_by_index(serial->minor, serial); - /* setup the proc dirs and files if needed */ hso_log_port(hso_dev);
@@ -3113,7 +3105,6 @@ static void hso_free_interface(struct usb_interface *interface) cancel_work_sync(&serial_table[i]->async_get_intf); hso_serial_tty_unregister(serial); kref_put(&serial_table[i]->ref, hso_serial_ref_free); - set_serial_by_index(i, NULL); } }
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 0842371eca3d..4adfa6a01198 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2725,12 +2725,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; } else if (err) { if (info) { + struct ip_tunnel_info *unclone; struct in_addr src, dst;
+ unclone = skb_tunnel_info_unclone(skb); + if (unlikely(!unclone)) + goto tx_error; + src = remote_ip.sin.sin_addr; dst = local_ip.sin.sin_addr; - info->key.u.ipv4.src = src.s_addr; - info->key.u.ipv4.dst = dst.s_addr; + unclone->key.u.ipv4.src = src.s_addr; + unclone->key.u.ipv4.dst = dst.s_addr; } vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); dst_release(ndst); @@ -2781,12 +2786,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; } else if (err) { if (info) { + struct ip_tunnel_info *unclone; struct in6_addr src, dst;
+ unclone = skb_tunnel_info_unclone(skb); + if (unlikely(!unclone)) + goto tx_error; + src = remote_ip.sin6.sin6_addr; dst = local_ip.sin6.sin6_addr; - info->key.u.ipv6.src = src; - info->key.u.ipv6.dst = dst; + unclone->key.u.ipv6.src = src; + unclone->key.u.ipv6.dst = dst; }
vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 0720f5f92caa..4d9dc7d15908 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
if (pad > 0) { /* Pad the frame with zeros */ if (__skb_pad(skb, pad, false)) - goto drop; + goto out; skb_put(skb, pad); } } @@ -448,8 +448,9 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK;
drop: - dev->stats.tx_dropped++; kfree_skb(skb); +out: + dev->stats.tx_dropped++; return NETDEV_TX_OK; }
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 720193d16539..7da193a12871 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -232,7 +232,7 @@ enum iwl_reg_capa_flags_v2 { REG_CAPA_V2_MCS_9_ALLOWED = BIT(6), REG_CAPA_V2_WEATHER_DISABLED = BIT(7), REG_CAPA_V2_40MHZ_ALLOWED = BIT(8), - REG_CAPA_V2_11AX_DISABLED = BIT(13), + REG_CAPA_V2_11AX_DISABLED = BIT(10), };
/* diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 8fba190e84cf..cecc32e7dbe8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" @@ -75,15 +75,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | - u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, - CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) | - u32_encode_bits(250, - CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) | - CSR_LTR_LONG_VAL_AD_SNOOP_REQ | - u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, - CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) | - u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL); struct iwl_context_info_gen3 *ctxt_info_gen3; struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; @@ -217,26 +208,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
- /* - * To workaround hardware latency issues during the boot process, - * initialize the LTR to ~250 usec (see ltr_val above). - * The firmware initializes this again later (to a smaller value). - */ - if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || - trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && - !trans->trans_cfg->integrated) { - iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val); - } else if (trans->trans_cfg->integrated && - trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { - iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL); - iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val); - } - - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); - else - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); - return 0;
err_free_ctxt_info: diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index d1bb273d6b6d..74ce31fdf45e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" @@ -240,7 +240,6 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); - iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
/* Context info will be released upon alive or failure to get one */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index c602b815dcc2..08788bc90683 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -260,6 +260,34 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) mutex_unlock(&trans_pcie->mutex); }
+static void iwl_pcie_set_ltr(struct iwl_trans *trans) +{ + u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | + u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, + CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) | + u32_encode_bits(250, + CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) | + CSR_LTR_LONG_VAL_AD_SNOOP_REQ | + u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, + CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) | + u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL); + + /* + * To workaround hardware latency issues during the boot process, + * initialize the LTR to ~250 usec (see ltr_val above). + * The firmware initializes this again later (to a smaller value). + */ + if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && + !trans->trans_cfg->integrated) { + iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val); + } else if (trans->trans_cfg->integrated && + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { + iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL); + iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val); + } +} + int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { @@ -326,6 +354,13 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, if (ret) goto out;
+ iwl_pcie_set_ltr(trans); + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); + else + iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); + /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) diff --git a/drivers/of/property.c b/drivers/of/property.c index 5f9eed79a8aa..6d8368bf88ca 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -1260,7 +1260,16 @@ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL) DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL) DEFINE_SUFFIX_PROP(regulators, "-supply", NULL) DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells") -DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells") + +static struct device_node *parse_gpios(struct device_node *np, + const char *prop_name, int index) +{ + if (!strcmp_suffix(prop_name, ",nr-gpios")) + return NULL; + + return parse_suffix_prop_cells(np, prop_name, index, "-gpios", + "#gpio-cells"); +}
static struct device_node *parse_iommu_maps(struct device_node *np, const char *prop_name, int index) diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 57cc92891a57..078648a9201b 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -483,11 +483,16 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) goto wakeup;
/* - * Switch events will wake the device and report the new switch - * position to the input subsystem. + * Some devices send (duplicate) tablet-mode events when moved + * around even though the mode has not changed; and they do this + * even when suspended. + * Update the switch state in case it changed and then return + * without waking up to avoid spurious wakeups. */ - if (priv->switches && (event == 0xcc || event == 0xcd)) - goto wakeup; + if (event == 0xcc || event == 0xcd) { + report_tablet_mode_event(priv->switches, event); + return; + }
/* Wake up on 5-button array events only. */ if (event == 0xc0 || !priv->array) @@ -501,9 +506,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) wakeup: pm_wakeup_hard_event(&device->dev);
- if (report_tablet_mode_event(priv->switches, event)) - return; - return; }
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index ddecf25b5dd4..d7894f178bd4 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -309,11 +309,20 @@ static bool sanity_check(struct ce_array *ca) return ret; }
+/** + * cec_add_elem - Add an element to the CEC array. + * @pfn: page frame number to insert + * + * Return values: + * - <0: on error + * - 0: on success + * - >0: when the inserted pfn was offlined + */ static int cec_add_elem(u64 pfn) { struct ce_array *ca = &ce_arr; + int count, err, ret = 0; unsigned int to = 0; - int count, ret = 0;
/* * We can be called very early on the identify_cpu() path where we are @@ -330,8 +339,8 @@ static int cec_add_elem(u64 pfn) if (ca->n == MAX_ELEMS) WARN_ON(!del_lru_elem_unlocked(ca));
- ret = find_elem(ca, pfn, &to); - if (ret < 0) { + err = find_elem(ca, pfn, &to); + if (err < 0) { /* * Shift range [to-end] to make room for one more element. */ diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c index e690c2ce5b3c..25e33028871c 100644 --- a/drivers/regulator/bd9571mwv-regulator.c +++ b/drivers/regulator/bd9571mwv-regulator.c @@ -124,7 +124,7 @@ static const struct regulator_ops vid_ops = {
static const struct regulator_desc regulators[] = { BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f, - 0x80, 600000, 10000, 0x3c), + 0x6f, 600000, 10000, 0x3c), BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf, 16, 1625000, 25000, 0), BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf, @@ -133,7 +133,7 @@ static const struct regulator_desc regulators[] = { 11, 2800000, 100000, 0), BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops, BD9571MWV_DVFS_MONIVDAC, 0x7f, - 0x80, 600000, 10000, 0x3c), + 0x6f, 600000, 10000, 0x3c), };
#ifdef CONFIG_PM_SLEEP diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c index 2667919d76b3..16979c1cd2f4 100644 --- a/drivers/remoteproc/pru_rproc.c +++ b/drivers/remoteproc/pru_rproc.c @@ -585,7 +585,7 @@ pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw) break; }
- if (pru->data->is_k3 && is_iram) { + if (pru->data->is_k3) { ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset, filesz); if (ret) { diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c index 5521c4437ffa..7c007dd7b200 100644 --- a/drivers/remoteproc/qcom_pil_info.c +++ b/drivers/remoteproc/qcom_pil_info.c @@ -56,7 +56,7 @@ static int qcom_pil_info_init(void) memset_io(base, 0, resource_size(&imem));
_reloc.base = base; - _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE; + _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
return 0; } diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index ea43dff40a85..6fa739c92beb 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -223,7 +223,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) PM8001_EVENT_LOG_SIZE; pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; - for (i = 0; i < PM8001_MAX_INB_NUM; i++) { + for (i = 0; i < pm8001_ha->max_q_num; i++) { pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = @@ -249,7 +249,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; } - for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { + for (i = 0; i < pm8001_ha->max_q_num; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = @@ -671,9 +671,9 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) read_outbnd_queue_table(pm8001_ha); /* update main config table ,inbound table and outbound table */ update_main_config_table(pm8001_ha); - for (i = 0; i < PM8001_MAX_INB_NUM; i++) + for (i = 0; i < pm8001_ha->max_q_num; i++) update_inbnd_queue_table(pm8001_ha, i); - for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) + for (i = 0; i < pm8001_ha->max_q_num; i++) update_outbnd_queue_table(pm8001_ha, i); /* 8081 controller donot require these operations */ if (deviceid != 0x8081 && deviceid != 0x0042) { diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 16e1bd1aa49d..e53a3f89e863 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -6363,37 +6363,34 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, DECLARE_COMPLETION_ONSTACK(wait); struct request *req; unsigned long flags; - int free_slot, task_tag, err; + int task_tag, err;
/* - * Get free slot, sleep if slots are unavailable. - * Even though we use wait_event() which sleeps indefinitely, - * the maximum wait time is bounded by %TM_CMD_TIMEOUT. + * blk_get_request() is used here only to get a free tag. */ req = blk_get_request(q, REQ_OP_DRV_OUT, 0); if (IS_ERR(req)) return PTR_ERR(req);
req->end_io_data = &wait; - free_slot = req->tag; - WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs); ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags); - task_tag = hba->nutrs + free_slot; + blk_mq_start_request(req);
+ task_tag = req->tag; treq->req_header.dword_0 |= cpu_to_be32(task_tag);
- memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq)); - ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); + memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); + ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
/* send command to the controller */ - __set_bit(free_slot, &hba->outstanding_tasks); + __set_bit(task_tag, &hba->outstanding_tasks);
/* Make sure descriptors are ready before ringing the task doorbell */ wmb();
- ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb();
@@ -6413,24 +6410,24 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); - if (ufshcd_clear_tm_cmd(hba, free_slot)) - dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", - __func__, free_slot); + if (ufshcd_clear_tm_cmd(hba, task_tag)) + dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", + __func__, task_tag); err = -ETIMEDOUT; } else { err = 0; - memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); + memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); }
spin_lock_irqsave(hba->host->host_lock, flags); - __clear_bit(free_slot, &hba->outstanding_tasks); + __clear_bit(task_tag, &hba->outstanding_tasks); spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_release(hba); blk_put_request(req);
- ufshcd_release(hba); return err; }
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index a1b9be1d105a..fde4edd83c14 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -186,7 +186,7 @@ struct qm_eqcr_entry { __be32 tag; struct qm_fd fd; u8 __reserved3[32]; -} __packed; +} __packed __aligned(8); #define QM_EQCR_VERB_VBIT 0x80 #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ #define QM_EQCR_VERB_CMD_ENQUEUE 0x01 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 518fac4864cf..a237f1cf9bd6 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1166,6 +1166,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
target_get_sess_cmd(&cmd->se_cmd, true);
+ cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb); if (cmd->sense_reason) { if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { @@ -1180,8 +1181,6 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (cmd->sense_reason) goto attach_cmd;
- /* only used for printks or comparing with ->ref_task_tag */ - cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd); if (cmd->sense_reason) goto attach_cmd; diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 620bcf586ee2..c44fad2b9fbb 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -347,7 +347,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) ret = tb_retimer_nvm_add(rt); if (ret) { dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); - device_del(&rt->dev); + device_unregister(&rt->dev); return ret; }
@@ -406,7 +406,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index) */ int tb_retimer_scan(struct tb_port *port) { - u32 status[TB_MAX_RETIMER_INDEX] = {}; + u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; int ret, i, last_idx = 0;
if (!port->cap_usb4) diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 8f1de1fbbeed..d8d3892e5a69 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -63,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
dev_info(dev, "stub up\n");
+ mutex_lock(&sdev->ud.sysfs_lock); spin_lock_irq(&sdev->ud.lock);
if (sdev->ud.status != SDEV_ST_AVAILABLE) { @@ -87,13 +88,13 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); if (IS_ERR(tcp_rx)) { sockfd_put(socket); - return -EINVAL; + goto unlock_mutex; } tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); if (IS_ERR(tcp_tx)) { kthread_stop(tcp_rx); sockfd_put(socket); - return -EINVAL; + goto unlock_mutex; }
/* get task structs now */ @@ -112,6 +113,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a wake_up_process(sdev->ud.tcp_rx); wake_up_process(sdev->ud.tcp_tx);
+ mutex_unlock(&sdev->ud.sysfs_lock); + } else { dev_info(dev, "stub down\n");
@@ -122,6 +125,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a spin_unlock_irq(&sdev->ud.lock);
usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN); + mutex_unlock(&sdev->ud.sysfs_lock); }
return count; @@ -130,6 +134,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a sockfd_put(socket); err: spin_unlock_irq(&sdev->ud.lock); +unlock_mutex: + mutex_unlock(&sdev->ud.sysfs_lock); return -EINVAL; } static DEVICE_ATTR_WO(usbip_sockfd); @@ -270,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev) sdev->ud.side = USBIP_STUB; sdev->ud.status = SDEV_ST_AVAILABLE; spin_lock_init(&sdev->ud.lock); + mutex_init(&sdev->ud.sysfs_lock); sdev->ud.tcp_socket = NULL; sdev->ud.sockfd = -1;
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 8be857a4fa13..a7e6ce96f62c 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -263,6 +263,9 @@ struct usbip_device { /* lock for status */ spinlock_t lock;
+ /* mutex for synchronizing sysfs store paths */ + struct mutex sysfs_lock; + int sockfd; struct socket *tcp_socket;
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c index 5d88917c9631..086ca76dd053 100644 --- a/drivers/usb/usbip/usbip_event.c +++ b/drivers/usb/usbip/usbip_event.c @@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work) while ((ud = get_event()) != NULL) { usbip_dbg_eh("pending event %lx\n", ud->event);
+ mutex_lock(&ud->sysfs_lock); /* * NOTE: shutdown must come first. * Shutdown the device. @@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work) ud->eh_ops.unusable(ud); unset_event(ud, USBIP_EH_UNUSABLE); } + mutex_unlock(&ud->sysfs_lock);
wake_up(&ud->eh_waitq); } diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index a20a8380ca0c..4ba6bcdaa8e9 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -1101,6 +1101,7 @@ static void vhci_device_init(struct vhci_device *vdev) vdev->ud.side = USBIP_VHCI; vdev->ud.status = VDEV_ST_NULL; spin_lock_init(&vdev->ud.lock); + mutex_init(&vdev->ud.sysfs_lock);
INIT_LIST_HEAD(&vdev->priv_rx); INIT_LIST_HEAD(&vdev->priv_tx); diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index e64ea314930b..ebc7be1d9820 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
usbip_dbg_vhci_sysfs("enter\n");
+ mutex_lock(&vdev->ud.sysfs_lock); + /* lock */ spin_lock_irqsave(&vhci->lock, flags); spin_lock(&vdev->ud.lock); @@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) /* unlock */ spin_unlock(&vdev->ud.lock); spin_unlock_irqrestore(&vhci->lock, flags); + mutex_unlock(&vdev->ud.sysfs_lock);
return -EINVAL; } @@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
+ mutex_unlock(&vdev->ud.sysfs_lock); + return 0; }
@@ -349,30 +354,36 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, else vdev = &vhci->vhci_hcd_hs->vdev[rhport];
+ mutex_lock(&vdev->ud.sysfs_lock); + /* Extract socket from fd. */ socket = sockfd_lookup(sockfd, &err); if (!socket) { dev_err(dev, "failed to lookup sock"); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; } if (socket->type != SOCK_STREAM) { dev_err(dev, "Expecting SOCK_STREAM - found %d", socket->type); sockfd_put(socket); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; }
/* create threads before locking */ tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); if (IS_ERR(tcp_rx)) { sockfd_put(socket); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; } tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); if (IS_ERR(tcp_tx)) { kthread_stop(tcp_rx); sockfd_put(socket); - return -EINVAL; + err = -EINVAL; + goto unlock_mutex; }
/* get task structs now */ @@ -397,7 +408,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, * Will be retried from userspace * if there's another free port. */ - return -EBUSY; + err = -EBUSY; + goto unlock_mutex; }
dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n", @@ -422,7 +434,15 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
rh_port_connect(vdev, speed);
+ dev_info(dev, "Device attached\n"); + + mutex_unlock(&vdev->ud.sysfs_lock); + return count; + +unlock_mutex: + mutex_unlock(&vdev->ud.sysfs_lock); + return err; } static DEVICE_ATTR_WO(attach);
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c index c8eeabdd9b56..2bc428f2e261 100644 --- a/drivers/usb/usbip/vudc_dev.c +++ b/drivers/usb/usbip/vudc_dev.c @@ -572,6 +572,7 @@ static int init_vudc_hw(struct vudc *udc) init_waitqueue_head(&udc->tx_waitq);
spin_lock_init(&ud->lock); + mutex_init(&ud->sysfs_lock); ud->status = SDEV_ST_AVAILABLE; ud->side = USBIP_VUDC;
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index 7383a543c6d1..f7633ee655a1 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -112,6 +112,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, dev_err(dev, "no device"); return -ENODEV; } + mutex_lock(&udc->ud.sysfs_lock); spin_lock_irqsave(&udc->lock, flags); /* Don't export what we don't have */ if (!udc->driver || !udc->pullup) { @@ -187,6 +188,8 @@ static ssize_t usbip_sockfd_store(struct device *dev,
wake_up_process(udc->ud.tcp_rx); wake_up_process(udc->ud.tcp_tx); + + mutex_unlock(&udc->ud.sysfs_lock); return count;
} else { @@ -207,6 +210,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, }
spin_unlock_irqrestore(&udc->lock, flags); + mutex_unlock(&udc->ud.sysfs_lock);
return count;
@@ -216,6 +220,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, spin_unlock_irq(&udc->ud.lock); unlock: spin_unlock_irqrestore(&udc->lock, flags); + mutex_unlock(&udc->ud.sysfs_lock);
return ret; } diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 08f742fd2409..b6cc53ba980c 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -4,9 +4,13 @@ #ifndef __MLX5_VDPA_H__ #define __MLX5_VDPA_H__
+#include <linux/etherdevice.h> +#include <linux/if_vlan.h> #include <linux/vdpa.h> #include <linux/mlx5/driver.h>
+#define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) + struct mlx5_vdpa_direct_mr { u64 start; u64 end; diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 25fd971be63f..ac6be2d722bb 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -820,7 +820,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, - !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1)); + !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); @@ -1169,6 +1169,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m return; } mvq->avail_idx = attr.available_index; + mvq->used_idx = attr.used_index; }
static void suspend_vqs(struct mlx5_vdpa_net *ndev) @@ -1426,6 +1427,7 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx, return -EINVAL; }
+ mvq->used_idx = state->avail_index; mvq->avail_idx = state->avail_index; return 0; } @@ -1443,7 +1445,11 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa * that cares about emulating the index after vq is stopped. */ if (!mvq->initialized) { - state->avail_index = mvq->avail_idx; + /* Firmware returns a wrong value for the available index. + * Since both values should be identical, we take the value of + * used_idx which is reported correctly. + */ + state->avail_index = mvq->used_idx; return 0; }
@@ -1452,7 +1458,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n"); return err; } - state->avail_index = attr.available_index; + state->avail_index = attr.used_index; return 0; }
@@ -1540,21 +1546,11 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) } }
-static void clear_virtqueues(struct mlx5_vdpa_net *ndev) -{ - int i; - - for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { - ndev->vqs[i].avail_idx = 0; - ndev->vqs[i].used_idx = 0; - } -} - /* TODO: cross-endian support */ static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) { return virtio_legacy_is_little_endian() || - (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1)); + (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); }
static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val) @@ -1785,7 +1781,6 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) if (!status) { mlx5_vdpa_info(mvdev, "performing device reset\n"); teardown_driver(ndev); - clear_virtqueues(ndev); mlx5_vdpa_destroy_mr(&ndev->mvdev); ndev->mvdev.status = 0; ndev->mvdev.mlx_features = 0; @@ -1907,6 +1902,19 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .free = mlx5_vdpa_free, };
+static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu) +{ + u16 hw_mtu; + int err; + + err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu); + if (err) + return err; + + *mtu = hw_mtu - MLX5V_ETH_HARD_MTU; + return 0; +} + static int alloc_resources(struct mlx5_vdpa_net *ndev) { struct mlx5_vdpa_net_resources *res = &ndev->res; @@ -1992,7 +2000,7 @@ static int mlx5v_probe(struct auxiliary_device *adev, init_mvqs(ndev); mutex_init(&ndev->reslock); config = &ndev->config; - err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu); + err = query_mtu(mdev, &ndev->mtu); if (err) goto err_mtu;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index d9148609bd09..1664edcdffd1 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -109,7 +109,7 @@ struct irq_info { unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */ unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ u64 eoi_time; /* Time in jiffies when to EOI. */ - spinlock_t lock; + raw_spinlock_t lock;
union { unsigned short virq; @@ -310,7 +310,7 @@ static int xen_irq_info_common_setup(struct irq_info *info, info->evtchn = evtchn; info->cpu = cpu; info->mask_reason = EVT_MASK_REASON_EXPLICIT; - spin_lock_init(&info->lock); + raw_spin_lock_init(&info->lock);
ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) @@ -463,28 +463,28 @@ static void do_mask(struct irq_info *info, u8 reason) { unsigned long flags;
- spin_lock_irqsave(&info->lock, flags); + raw_spin_lock_irqsave(&info->lock, flags);
if (!info->mask_reason) mask_evtchn(info->evtchn);
info->mask_reason |= reason;
- spin_unlock_irqrestore(&info->lock, flags); + raw_spin_unlock_irqrestore(&info->lock, flags); }
static void do_unmask(struct irq_info *info, u8 reason) { unsigned long flags;
- spin_lock_irqsave(&info->lock, flags); + raw_spin_lock_irqsave(&info->lock, flags);
info->mask_reason &= ~reason;
if (!info->mask_reason) unmask_evtchn(info->evtchn);
- spin_unlock_irqrestore(&info->lock, flags); + raw_spin_unlock_irqrestore(&info->lock, flags); }
#ifdef CONFIG_X86 diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index fe03cbdae959..bf52e9326ebe 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -18,6 +18,7 @@ config CIFS select CRYPTO_AES select CRYPTO_LIB_DES select KEYS + select DNS_RESOLVER help This is the client VFS module for the SMB3 family of NAS protocols, (including support for the most recent, most secure dialect SMB3.1.1) @@ -112,7 +113,6 @@ config CIFS_WEAK_PW_HASH config CIFS_UPCALL bool "Kerberos/SPNEGO advanced session setup" depends on CIFS - select DNS_RESOLVER help Enables an upcall mechanism for CIFS which accesses userspace helper utilities to provide SPNEGO packaged (RFC 4178) Kerberos tickets @@ -179,7 +179,6 @@ config CIFS_DEBUG_DUMP_KEYS config CIFS_DFS_UPCALL bool "DFS feature support" depends on CIFS - select DNS_RESOLVER help Distributed File System (DFS) support is used to access shares transparently in an enterprise name space, even if the share diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index 5213b20843b5..3ee3b7de4ded 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile @@ -10,13 +10,14 @@ cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \ cifs_unicode.o nterr.o cifsencrypt.o \ readdir.o ioctl.o sess.o export.o smb1ops.o unc.o winucase.o \ smb2ops.o smb2maperror.o smb2transport.o \ - smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o + smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \ + dns_resolve.o
cifs-$(CONFIG_CIFS_XATTR) += xattr.o
cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
-cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o dfs_cache.o +cifs-$(CONFIG_CIFS_DFS_UPCALL) += cifs_dfs_ref.o dfs_cache.o
cifs-$(CONFIG_CIFS_SWN_UPCALL) += netlink.o cifs_swn.o
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8a6a1772590b..8fc877fb369e 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -475,7 +475,8 @@ static int cifs_show_devname(struct seq_file *m, struct dentry *root) seq_puts(m, "none"); else { convert_delimiter(devname, '/'); - seq_puts(m, devname); + /* escape all spaces in share names */ + seq_escape(m, devname, " \t"); kfree(devname); } return 0; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 70d0f0388af4..2b72b8893aff 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -87,7 +87,6 @@ static void cifs_prune_tlinks(struct work_struct *work); * * This should be called with server->srv_mutex held. */ -#ifdef CONFIG_CIFS_DFS_UPCALL static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) { int rc; @@ -124,6 +123,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) return !rc ? -1 : 0; }
+#ifdef CONFIG_CIFS_DFS_UPCALL /* These functions must be called with server->srv_mutex held */ static void reconn_set_next_dfs_target(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb, @@ -321,14 +321,29 @@ cifs_reconnect(struct TCP_Server_Info *server) #endif
#ifdef CONFIG_CIFS_DFS_UPCALL + if (cifs_sb && cifs_sb->origin_fullpath) /* * Set up next DFS target server (if any) for reconnect. If DFS * feature is disabled, then we will retry last server we * connected to before. */ reconn_set_next_dfs_target(server, cifs_sb, &tgt_list, &tgt_it); + else { +#endif + /* + * Resolve the hostname again to make sure that IP address is up-to-date. + */ + rc = reconn_set_ipaddr_from_hostname(server); + if (rc) { + cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n", + __func__, rc); + } + +#ifdef CONFIG_CIFS_DFS_UPCALL + } #endif
+ #ifdef CONFIG_CIFS_SWN_UPCALL } #endif diff --git a/fs/direct-io.c b/fs/direct-io.c index d53fa92a1ab6..c64d4eb38995 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -810,6 +810,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, struct buffer_head *map_bh) { int ret = 0; + int boundary = sdio->boundary; /* dio_send_cur_page may clear it */
if (dio->op == REQ_OP_WRITE) { /* @@ -848,10 +849,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; out: /* - * If sdio->boundary then we want to schedule the IO now to + * If boundary then we want to schedule the IO now to * avoid metadata seeks. */ - if (sdio->boundary) { + if (boundary) { ret = dio_send_cur_page(dio, sdio, map_bh); if (sdio->bio) dio_bio_submit(dio, sdio); diff --git a/fs/file.c b/fs/file.c index f3a4bac2cbe9..f633348029a5 100644 --- a/fs/file.c +++ b/fs/file.c @@ -629,17 +629,30 @@ int close_fd(unsigned fd) } EXPORT_SYMBOL(close_fd); /* for ksys_close() */
+/** + * last_fd - return last valid index into fd table + * @cur_fds: files struct + * + * Context: Either rcu read lock or files_lock must be held. + * + * Returns: Last valid index into fdtable. + */ +static inline unsigned last_fd(struct fdtable *fdt) +{ + return fdt->max_fds - 1; +} + static inline void __range_cloexec(struct files_struct *cur_fds, unsigned int fd, unsigned int max_fd) { struct fdtable *fdt;
- if (fd > max_fd) - return; - + /* make sure we're using the correct maximum value */ spin_lock(&cur_fds->file_lock); fdt = files_fdtable(cur_fds); - bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); + max_fd = min(last_fd(fdt), max_fd); + if (fd <= max_fd) + bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); spin_unlock(&cur_fds->file_lock); }
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index aea35459d390..07467ca0f71d 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -142,7 +142,7 @@ static char *follow_link(char *link) char *name, *resolved, *end; int n;
- name = __getname(); + name = kmalloc(PATH_MAX, GFP_KERNEL); if (!name) { n = -ENOMEM; goto out_free; @@ -171,12 +171,11 @@ static char *follow_link(char *link) goto out_free; }
- __putname(name); - kfree(link); + kfree(name); return resolved;
out_free: - __putname(name); + kfree(name); return ERR_PTR(n); }
diff --git a/fs/namei.c b/fs/namei.c index dd85e12ac85a..b7c0dcc25bd4 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2330,16 +2330,16 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path while (!(err = link_path_walk(s, nd)) && (s = lookup_last(nd)) != NULL) ; + if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) { + err = handle_lookup_down(nd); + nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please... + } if (!err) err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY) if (!d_can_lookup(nd->path.dentry)) err = -ENOTDIR; - if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) { - err = handle_lookup_down(nd); - nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please... - } if (!err) { *path = nd->path; nd->path.mnt = NULL; diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 3bfb4147895a..ad20403b383f 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -2295,7 +2295,7 @@ static int ocfs2_dio_end_io_write(struct inode *inode, struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle = NULL; loff_t end = offset + bytes; - int ret = 0, credits = 0, locked = 0; + int ret = 0, credits = 0;
ocfs2_init_dealloc_ctxt(&dealloc);
@@ -2306,13 +2306,6 @@ static int ocfs2_dio_end_io_write(struct inode *inode, !dwc->dw_orphaned) goto out;
- /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we - * are in that context. */ - if (dwc->dw_writer_pid != task_pid_nr(current)) { - inode_lock(inode); - locked = 1; - } - ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret < 0) { mlog_errno(ret); @@ -2393,8 +2386,6 @@ static int ocfs2_dio_end_io_write(struct inode *inode, if (meta_ac) ocfs2_free_alloc_context(meta_ac); ocfs2_run_deallocs(osb, &dealloc); - if (locked) - inode_unlock(inode); ocfs2_dio_free_write_ctx(inode, dwc);
return ret; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 85979e2214b3..8880071ee4ee 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1244,22 +1244,24 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) goto bail_unlock; } } + down_write(&OCFS2_I(inode)->ip_alloc_sem); handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS + 2 * ocfs2_quota_trans_credits(sb)); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); - goto bail_unlock; + goto bail_unlock_alloc; } status = __dquot_transfer(inode, transfer_to); if (status < 0) goto bail_commit; } else { + down_write(&OCFS2_I(inode)->ip_alloc_sem); handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); - goto bail_unlock; + goto bail_unlock_alloc; } }
@@ -1272,6 +1274,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
bail_commit: ocfs2_commit_trans(osb, handle); +bail_unlock_alloc: + up_write(&OCFS2_I(inode)->ip_alloc_sem); bail_unlock: if (status && inode_locked) { ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 40bad71865ea..532bcbfc4716 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -476,7 +476,6 @@ struct virtchnl_rss_key { u16 vsi_id; u16 key_len; u8 key[1]; /* RSS hash key, packed bytes */ - u8 pad[1]; };
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); @@ -485,7 +484,6 @@ struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; u8 lut[1]; /* RSS lookup table */ - u8 pad[1]; };
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 442c0160caab..6370ba10f1fd 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -437,11 +437,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_60[0x18]; u8 log_max_ft_num[0x8];
- u8 reserved_at_80[0x18]; + u8 reserved_at_80[0x10]; + u8 log_max_flow_counter[0x8]; u8 log_max_destination[0x8];
- u8 log_max_flow_counter[0x8]; - u8 reserved_at_a8[0x10]; + u8 reserved_at_a0[0x18]; u8 log_max_flow[0x8];
u8 reserved_at_c0[0x40]; @@ -8769,6 +8769,8 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_100g_2x[0x10]; u8 fec_override_admin_50g_1x[0x10]; + + u8 reserved_at_140[0x140]; };
struct mlx5_ifc_ppcnt_reg_bits { @@ -10106,7 +10108,7 @@ struct mlx5_ifc_pbmc_reg_bits {
struct mlx5_ifc_bufferx_reg_bits buffer[10];
- u8 reserved_at_2e0[0x40]; + u8 reserved_at_2e0[0x80]; };
struct mlx5_ifc_qtct_reg_bits { diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index fec0c5ac1c4f..82126d529798 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -349,8 +349,13 @@ static inline void sk_psock_update_proto(struct sock *sk, static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { - sk->sk_prot->unhash = psock->saved_unhash; if (inet_csk_has_ulp(sk)) { + /* TLS does not have an unhash proto in SW cases, but we need + * to ensure we stop using the sock_map unhash routine because + * the associated psock is being removed. So use the original + * unhash handler. + */ + WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); } else { sk->sk_write_space = psock->saved_write_space; diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 6b5fcfa1e555..98775d7fa696 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -62,6 +62,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, return -EINVAL; }
+ skb_reset_mac_header(skb); + if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start); u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); diff --git a/include/net/act_api.h b/include/net/act_api.h index 2bf3092ae7ec..086b291e9530 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -170,12 +170,7 @@ void tcf_idr_insert_many(struct tc_action *actions[]); void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, struct tc_action **a, int bind); -int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); - -static inline int tcf_idr_release(struct tc_action *a, bool bind) -{ - return __tcf_idr_release(a, bind, false); -} +int tcf_idr_release(struct tc_action *a, bool bind);
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops); int tcf_unregister_action(struct tc_action_ops *a, @@ -185,7 +180,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int nr_actions, struct tcf_result *res); int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, - struct tc_action *actions[], size_t *attr_size, + struct tc_action *actions[], int init_res[], size_t *attr_size, bool rtnl_held, struct netlink_ext_ack *extack); struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla, bool rtnl_held, @@ -193,7 +188,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla, struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, - struct tc_action_ops *ops, bool rtnl_held, + struct tc_action_ops *a_o, int *init_res, + bool rtnl_held, struct netlink_ext_ack *extack); int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, int ref, bool terse); diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 59f45b1e9dac..b59d73d529ba 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -72,7 +72,9 @@ struct netns_xfrm { #if IS_ENABLED(CONFIG_IPV6) struct dst_ops xfrm6_dst_ops; #endif - spinlock_t xfrm_state_lock; + spinlock_t xfrm_state_lock; + seqcount_t xfrm_state_hash_generation; + spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; }; diff --git a/include/net/red.h b/include/net/red.h index 9e6647c4ccd1..cc9f6b0d7f1e 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -171,9 +171,9 @@ static inline void red_set_vars(struct red_vars *v) static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log, u8 *stab) { - if (fls(qth_min) + Wlog > 32) + if (fls(qth_min) + Wlog >= 32) return false; - if (fls(qth_max) + Wlog > 32) + if (fls(qth_max) + Wlog >= 32) return false; if (Scell_log >= 32) return false; diff --git a/include/net/sock.h b/include/net/sock.h index 129d200bccb4..6f4408410462 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2215,6 +2215,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) sk_mem_charge(sk, skb->truesize); }
+static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk) +{ + if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { + skb_orphan(skb); + skb->destructor = sock_efree; + skb->sk = sk; + } +} + void sk_reset_timer(struct sock *sk, struct timer_list *timer, unsigned long expires);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index b2a06f10b62c..c58a6d4eb610 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1097,7 +1097,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir, return __xfrm_policy_check(sk, ndir, skb, family);
return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) || - (skb_dst(skb)->flags & DST_NOPOLICY) || + (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || __xfrm_policy_check(sk, ndir, skb, family); }
@@ -1557,7 +1557,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, int xfrm_trans_queue(struct sk_buff *skb, int (*finish)(struct net *, struct sock *, struct sk_buff *)); -int xfrm_output_resume(struct sk_buff *skb, int err); +int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err); int xfrm_output(struct sock *sk, struct sk_buff *skb);
#if IS_ENABLED(CONFIG_NET_PKTGEN) diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h index f75238ac6dce..c7535352fef6 100644 --- a/include/uapi/linux/can.h +++ b/include/uapi/linux/can.h @@ -113,7 +113,7 @@ struct can_frame { */ __u8 len; __u8 can_dlc; /* deprecated */ - }; + } __attribute__((packed)); /* disable padding added in some ABIs */ __u8 __pad; /* padding */ __u8 __res0; /* reserved / padding */ __u8 len8_dlc; /* optional DLC for 8 byte payload length (9 .. 15) */ diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h index 03e8af87b364..9b77cfc42efa 100644 --- a/include/uapi/linux/rfkill.h +++ b/include/uapi/linux/rfkill.h @@ -86,34 +86,90 @@ enum rfkill_hard_block_reasons { * @op: operation code * @hard: hard state (0/1) * @soft: soft state (0/1) + * + * Structure used for userspace communication on /dev/rfkill, + * used for events from the kernel and control to the kernel. + */ +struct rfkill_event { + __u32 idx; + __u8 type; + __u8 op; + __u8 soft; + __u8 hard; +} __attribute__((packed)); + +/** + * struct rfkill_event_ext - events for userspace on /dev/rfkill + * @idx: index of dev rfkill + * @type: type of the rfkill struct + * @op: operation code + * @hard: hard state (0/1) + * @soft: soft state (0/1) * @hard_block_reasons: valid if hard is set. One or several reasons from * &enum rfkill_hard_block_reasons. * * Structure used for userspace communication on /dev/rfkill, * used for events from the kernel and control to the kernel. + * + * See the extensibility docs below. */ -struct rfkill_event { +struct rfkill_event_ext { __u32 idx; __u8 type; __u8 op; __u8 soft; __u8 hard; + + /* + * older kernels will accept/send only up to this point, + * and if extended further up to any chunk marked below + */ + __u8 hard_block_reasons; } __attribute__((packed));
-/* - * We are planning to be backward and forward compatible with changes - * to the event struct, by adding new, optional, members at the end. - * When reading an event (whether the kernel from userspace or vice - * versa) we need to accept anything that's at least as large as the - * version 1 event size, but might be able to accept other sizes in - * the future. +/** + * DOC: Extensibility + * + * Originally, we had planned to allow backward and forward compatible + * changes by just adding fields at the end of the structure that are + * then not reported on older kernels on read(), and not written to by + * older kernels on write(), with the kernel reporting the size it did + * accept as the result. + * + * This would have allowed userspace to detect on read() and write() + * which kernel structure version it was dealing with, and if was just + * recompiled it would have gotten the new fields, but obviously not + * accessed them, but things should've continued to work. + * + * Unfortunately, while actually exercising this mechanism to add the + * hard block reasons field, we found that userspace (notably systemd) + * did all kinds of fun things not in line with this scheme: + * + * 1. treat the (expected) short writes as an error; + * 2. ask to read sizeof(struct rfkill_event) but then compare the + * actual return value to RFKILL_EVENT_SIZE_V1 and treat any + * mismatch as an error. + * + * As a consequence, just recompiling with a new struct version caused + * things to no longer work correctly on old and new kernels. + * + * Hence, we've rolled back &struct rfkill_event to the original version + * and added &struct rfkill_event_ext. This effectively reverts to the + * old behaviour for all userspace, unless it explicitly opts in to the + * rules outlined here by using the new &struct rfkill_event_ext. + * + * Userspace using &struct rfkill_event_ext must adhere to the following + * rules * - * One exception is the kernel -- we already have two event sizes in - * that we've made the 'hard' member optional since our only option - * is to ignore it anyway. + * 1. accept short writes, optionally using them to detect that it's + * running on an older kernel; + * 2. accept short reads, knowing that this means it's running on an + * older kernel; + * 3. treat reads that are as long as requested as acceptable, not + * checking against RFKILL_EVENT_SIZE_V1 or such. */ -#define RFKILL_EVENT_SIZE_V1 8 +#define RFKILL_EVENT_SIZE_V1 sizeof(struct rfkill_event)
/* ioctl for turning off rfkill-input (if present) */ #define RFKILL_IOC_MAGIC 'R' diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index dd4b7fd60ee7..6b14b4c4068c 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -546,7 +546,7 @@ int bpf_obj_get_user(const char __user *pathname, int flags) else if (type == BPF_TYPE_MAP) ret = bpf_map_new_fd(raw, f_flags); else if (type == BPF_TYPE_LINK) - ret = bpf_link_new_fd(raw); + ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw); else return -ENOENT;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index bfafbf115bf3..e274a3319431 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -652,9 +652,17 @@ const struct bpf_func_proto bpf_get_stack_proto = { BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, u32, size, u64, flags) { - struct pt_regs *regs = task_pt_regs(task); + struct pt_regs *regs; + long res;
- return __bpf_get_stack(regs, task, NULL, buf, size, flags); + if (!try_get_task_stack(task)) + return -EFAULT; + + regs = task_pt_regs(task); + res = __bpf_get_stack(regs, task, NULL, buf, size, flags); + put_task_stack(task); + + return res; }
BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5b233e911c2c..36b81975d9cd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -11570,6 +11570,11 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) u32 btf_id, member_idx; const char *mname;
+ if (!prog->gpl_compatible) { + verbose(env, "struct ops programs must have a GPL compatible license\n"); + return -EINVAL; + } + btf_id = prog->aux->attach_btf_id; st_ops = bpf_struct_ops_find(btf_id); if (!st_ops) { diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c index 8743150db2ac..c466c7fbdece 100644 --- a/kernel/gcov/clang.c +++ b/kernel/gcov/clang.c @@ -70,7 +70,9 @@ struct gcov_fn_info {
u32 ident; u32 checksum; +#if CONFIG_CLANG_VERSION < 110000 u8 use_extra_checksum; +#endif u32 cfg_checksum;
u32 num_counters; @@ -145,10 +147,8 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
list_add_tail(&info->head, ¤t_info->functions); } -EXPORT_SYMBOL(llvm_gcda_emit_function); #else -void llvm_gcda_emit_function(u32 ident, u32 func_checksum, - u8 use_extra_checksum, u32 cfg_checksum) +void llvm_gcda_emit_function(u32 ident, u32 func_checksum, u32 cfg_checksum) { struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -158,12 +158,11 @@ void llvm_gcda_emit_function(u32 ident, u32 func_checksum, INIT_LIST_HEAD(&info->head); info->ident = ident; info->checksum = func_checksum; - info->use_extra_checksum = use_extra_checksum; info->cfg_checksum = cfg_checksum; list_add_tail(&info->head, ¤t_info->functions); } -EXPORT_SYMBOL(llvm_gcda_emit_function); #endif +EXPORT_SYMBOL(llvm_gcda_emit_function);
void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters) { @@ -293,11 +292,16 @@ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2) !list_is_last(&fn_ptr2->head, &info2->functions)) { if (fn_ptr1->checksum != fn_ptr2->checksum) return false; +#if CONFIG_CLANG_VERSION < 110000 if (fn_ptr1->use_extra_checksum != fn_ptr2->use_extra_checksum) return false; if (fn_ptr1->use_extra_checksum && fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum) return false; +#else + if (fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum) + return false; +#endif fn_ptr1 = list_next_entry(fn_ptr1, head); fn_ptr2 = list_next_entry(fn_ptr2, head); } @@ -529,17 +533,22 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
list_for_each_entry(fi_ptr, &info->functions, head) { u32 i; - u32 len = 2; - - if (fi_ptr->use_extra_checksum) - len++;
pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION); - pos += store_gcov_u32(buffer, pos, len); +#if CONFIG_CLANG_VERSION < 110000 + pos += store_gcov_u32(buffer, pos, + fi_ptr->use_extra_checksum ? 3 : 2); +#else + pos += store_gcov_u32(buffer, pos, 3); +#endif pos += store_gcov_u32(buffer, pos, fi_ptr->ident); pos += store_gcov_u32(buffer, pos, fi_ptr->checksum); +#if CONFIG_CLANG_VERSION < 110000 if (fi_ptr->use_extra_checksum) pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum); +#else + pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum); +#endif
pos += store_gcov_u32(buffer, pos, GCOV_TAG_COUNTER_BASE); pos += store_gcov_u32(buffer, pos, fi_ptr->num_counters * 2); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 780012eb2f3f..eead7efbe7e5 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -705,7 +705,7 @@ static void print_lock_name(struct lock_class *class)
printk(KERN_CONT " ("); __print_lock_name(class); - printk(KERN_CONT "){%s}-{%hd:%hd}", usage, + printk(KERN_CONT "){%s}-{%d:%d}", usage, class->wait_type_outer ?: class->wait_type_inner, class->wait_type_inner); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 894bb885b40b..6326a872510b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1412,7 +1412,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, */ lockdep_assert_irqs_disabled();
- debug_work_activate(work);
/* if draining, only works from the same workqueue are allowed */ if (unlikely(wq->flags & __WQ_DRAINING) && @@ -1494,6 +1493,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, worklist = &pwq->delayed_works; }
+ debug_work_activate(work); insert_work(pwq, work, worklist, work_flags);
out: diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h index 18b768ac7dca..095d7eaa0db4 100644 --- a/mm/percpu-internal.h +++ b/mm/percpu-internal.h @@ -87,7 +87,7 @@ extern spinlock_t pcpu_lock;
extern struct list_head *pcpu_chunk_lists; extern int pcpu_nr_slots; -extern int pcpu_nr_empty_pop_pages; +extern int pcpu_nr_empty_pop_pages[];
extern struct pcpu_chunk *pcpu_first_chunk; extern struct pcpu_chunk *pcpu_reserved_chunk; diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c index c8400a2adbc2..f6026dbcdf6b 100644 --- a/mm/percpu-stats.c +++ b/mm/percpu-stats.c @@ -145,6 +145,7 @@ static int percpu_stats_show(struct seq_file *m, void *v) int slot, max_nr_alloc; int *buffer; enum pcpu_chunk_type type; + int nr_empty_pop_pages;
alloc_buffer: spin_lock_irq(&pcpu_lock); @@ -165,7 +166,11 @@ static int percpu_stats_show(struct seq_file *m, void *v) goto alloc_buffer; }
-#define PL(X) \ + nr_empty_pop_pages = 0; + for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++) + nr_empty_pop_pages += pcpu_nr_empty_pop_pages[type]; + +#define PL(X) \ seq_printf(m, " %-20s: %12lld\n", #X, (long long int)pcpu_stats_ai.X)
seq_printf(m, @@ -196,7 +201,7 @@ static int percpu_stats_show(struct seq_file *m, void *v) PU(nr_max_chunks); PU(min_alloc_size); PU(max_alloc_size); - P("empty_pop_pages", pcpu_nr_empty_pop_pages); + P("empty_pop_pages", nr_empty_pop_pages); seq_putc(m, '\n');
#undef PU diff --git a/mm/percpu.c b/mm/percpu.c index ad7a37ee74ef..e12ab708fe15 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -172,10 +172,10 @@ struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ static LIST_HEAD(pcpu_map_extend_chunks);
/* - * The number of empty populated pages, protected by pcpu_lock. The - * reserved chunk doesn't contribute to the count. + * The number of empty populated pages by chunk type, protected by pcpu_lock. + * The reserved chunk doesn't contribute to the count. */ -int pcpu_nr_empty_pop_pages; +int pcpu_nr_empty_pop_pages[PCPU_NR_CHUNK_TYPES];
/* * The number of populated pages in use by the allocator, protected by @@ -555,7 +555,7 @@ static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) { chunk->nr_empty_pop_pages += nr; if (chunk != pcpu_reserved_chunk) - pcpu_nr_empty_pop_pages += nr; + pcpu_nr_empty_pop_pages[pcpu_chunk_type(chunk)] += nr; }
/* @@ -1831,7 +1831,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, mutex_unlock(&pcpu_alloc_mutex); }
- if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) + if (pcpu_nr_empty_pop_pages[type] < PCPU_EMPTY_POP_PAGES_LOW) pcpu_schedule_balance_work();
/* clear the areas and return address relative to base address */ @@ -1999,7 +1999,7 @@ static void __pcpu_balance_workfn(enum pcpu_chunk_type type) pcpu_atomic_alloc_failed = false; } else { nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - - pcpu_nr_empty_pop_pages, + pcpu_nr_empty_pop_pages[type], 0, PCPU_EMPTY_POP_PAGES_HIGH); }
@@ -2579,7 +2579,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
/* link the first chunk in */ pcpu_first_chunk = chunk; - pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; + pcpu_nr_empty_pop_pages[PCPU_CHUNK_ROOT] = pcpu_first_chunk->nr_empty_pop_pages; pcpu_chunk_relocate(pcpu_first_chunk, -1);
/* include all regions of the first chunk */ diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index cd09916f97fe..0e32e31872e2 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -890,6 +890,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node, hlist_for_each_entry(vlan, &orig_node->vlan_list, list) { tt_vlan->vid = htons(vlan->vid); tt_vlan->crc = htonl(vlan->tt.crc); + tt_vlan->reserved = 0;
tt_vlan++; } @@ -973,6 +974,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
tt_vlan->vid = htons(vlan->vid); tt_vlan->crc = htonl(vlan->tt.crc); + tt_vlan->reserved = 0;
tt_vlan++; } diff --git a/net/can/bcm.c b/net/can/bcm.c index 0e5c37be4a2b..909b9e684e04 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -86,6 +86,8 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp oliver.hartkopp@volkswagen.de"); MODULE_ALIAS("can-proto-2");
+#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) + /* * easy access to the first 64 bit of can(fd)_frame payload. cp->data is * 64 bit aligned so the offset has to be multiples of 8 which is ensured @@ -1292,7 +1294,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) /* no bound device as default => check msg_name */ DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
- if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex)) + if (msg->msg_namelen < BCM_MIN_NAMELEN) return -EINVAL;
if (addr->can_family != AF_CAN) @@ -1534,7 +1536,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, struct net *net = sock_net(sk); int ret = 0;
- if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex)) + if (len < BCM_MIN_NAMELEN) return -EINVAL;
lock_sock(sk); @@ -1616,8 +1618,8 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) { - __sockaddr_check_size(sizeof(struct sockaddr_can)); - msg->msg_namelen = sizeof(struct sockaddr_can); + __sockaddr_check_size(BCM_MIN_NAMELEN); + msg->msg_namelen = BCM_MIN_NAMELEN; memcpy(msg->msg_name, skb->cb, msg->msg_namelen); }
diff --git a/net/can/isotp.c b/net/can/isotp.c index 15ea1234d457..9f94ad3caee9 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -77,6 +77,8 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp socketcan@hartkopp.net"); MODULE_ALIAS("can-proto-6");
+#define ISOTP_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp) + #define SINGLE_MASK(id) (((id) & CAN_EFF_FLAG) ? \ (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) @@ -986,7 +988,8 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, sock_recv_timestamp(msg, sk, skb);
if (msg->msg_name) { - msg->msg_namelen = sizeof(struct sockaddr_can); + __sockaddr_check_size(ISOTP_MIN_NAMELEN); + msg->msg_namelen = ISOTP_MIN_NAMELEN; memcpy(msg->msg_name, skb->cb, msg->msg_namelen); }
@@ -1056,7 +1059,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) int notify_enetdown = 0; int do_rx_reg = 1;
- if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp)) + if (len < ISOTP_MIN_NAMELEN) return -EINVAL;
/* do not register frame reception for functional addressing */ @@ -1152,13 +1155,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer) if (peer) return -EOPNOTSUPP;
- memset(addr, 0, sizeof(*addr)); + memset(addr, 0, ISOTP_MIN_NAMELEN); addr->can_family = AF_CAN; addr->can_ifindex = so->ifindex; addr->can_addr.tp.rx_id = so->rxid; addr->can_addr.tp.tx_id = so->txid;
- return sizeof(*addr); + return ISOTP_MIN_NAMELEN; }
static int isotp_setsockopt(struct socket *sock, int level, int optname, diff --git a/net/can/raw.c b/net/can/raw.c index 6ec8aa1d0da4..95113b0898b2 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -60,6 +60,8 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann urs.thuermann@volkswagen.de"); MODULE_ALIAS("can-proto-1");
+#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) + #define MASK_ALL 0
/* A raw socket has a list of can_filters attached to it, each receiving @@ -394,7 +396,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) int err = 0; int notify_enetdown = 0;
- if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex)) + if (len < RAW_MIN_NAMELEN) return -EINVAL; if (addr->can_family != AF_CAN) return -EINVAL; @@ -475,11 +477,11 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr, if (peer) return -EOPNOTSUPP;
- memset(addr, 0, sizeof(*addr)); + memset(addr, 0, RAW_MIN_NAMELEN); addr->can_family = AF_CAN; addr->can_ifindex = ro->ifindex;
- return sizeof(*addr); + return RAW_MIN_NAMELEN; }
static int raw_setsockopt(struct socket *sock, int level, int optname, @@ -731,7 +733,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
- if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex)) + if (msg->msg_namelen < RAW_MIN_NAMELEN) return -EINVAL;
if (addr->can_family != AF_CAN) @@ -824,8 +826,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) { - __sockaddr_check_size(sizeof(struct sockaddr_can)); - msg->msg_namelen = sizeof(struct sockaddr_can); + __sockaddr_check_size(RAW_MIN_NAMELEN); + msg->msg_namelen = RAW_MIN_NAMELEN; memcpy(msg->msg_name, skb->cb, msg->msg_namelen); }
diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 25cdbb20f3a0..923a1d0f84ca 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -488,6 +488,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb if (unlikely(!msg)) return -EAGAIN; sk_msg_init(msg); + skb_set_owner_r(skb, sk); return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); }
@@ -791,7 +792,6 @@ static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int { switch (verdict) { case __SK_REDIRECT: - skb_set_owner_r(skb, sk); sk_psock_skb_redirect(skb); break; case __SK_PASS: @@ -809,10 +809,6 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) rcu_read_lock(); prog = READ_ONCE(psock->progs.skb_verdict); if (likely(prog)) { - /* We skip full set_owner_r here because if we do a SK_PASS - * or SK_DROP we can skip skb memory accounting and use the - * TLS context. - */ skb->sk = psock->sk; tcp_skb_bpf_redirect_clear(skb); ret = sk_psock_bpf_run(psock, prog, skb); @@ -881,12 +877,13 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) kfree_skb(skb); goto out; } - skb_set_owner_r(skb, sk); prog = READ_ONCE(psock->progs.skb_verdict); if (likely(prog)) { + skb->sk = sk; tcp_skb_bpf_redirect_clear(skb); ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); + skb->sk = NULL; } sk_psock_verdict_apply(psock, skb, ret); out: @@ -957,12 +954,13 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb, kfree_skb(skb); goto out; } - skb_set_owner_r(skb, sk); prog = READ_ONCE(psock->progs.skb_verdict); if (likely(prog)) { + skb->sk = sk; tcp_skb_bpf_redirect_clear(skb); ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); + skb->sk = NULL; } sk_psock_verdict_apply(psock, skb, ret); out: diff --git a/net/core/sock.c b/net/core/sock.c index bbcd4b97eddd..01a680c5c7ae 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2118,16 +2118,10 @@ void skb_orphan_partial(struct sk_buff *skb) if (skb_is_tcp_pure_ack(skb)) return;
- if (can_skb_orphan_partial(skb)) { - struct sock *sk = skb->sk; - - if (refcount_inc_not_zero(&sk->sk_refcnt)) { - WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); - skb->destructor = sock_efree; - } - } else { + if (can_skb_orphan_partial(skb)) + skb_set_owner_sk_safe(skb, skb->sk); + else skb_orphan(skb); - } } EXPORT_SYMBOL(skb_orphan_partial);
diff --git a/net/core/xdp.c b/net/core/xdp.c index 3a8c9ab4ecbe..a86bc3660729 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -350,7 +350,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); page = virt_to_head_page(data); - napi_direct &= !xdp_return_frame_no_direct(); + if (napi_direct && xdp_return_frame_no_direct()) + napi_direct = false; page_pool_put_full_page(xa->page_pool, page, napi_direct); rcu_read_unlock(); break; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index a04fd637b4cd..3ada338d7e08 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -533,8 +533,14 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
list_for_each_entry(dp, &dst->ports, list) { err = dsa_port_setup(dp); - if (err) + if (err) { + dsa_port_devlink_teardown(dp); + dp->type = DSA_PORT_TYPE_UNUSED; + err = dsa_port_devlink_setup(dp); + if (err) + goto teardown; continue; + } }
return 0; diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c index 901b7de941ab..e10bfcc07853 100644 --- a/net/ethtool/eee.c +++ b/net/ethtool/eee.c @@ -169,8 +169,8 @@ int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info) ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod); ethnl_update_bool32(&eee.tx_lpi_enabled, tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod); - ethnl_update_bool32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER], - &mod); + ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER], + &mod); ret = 0; if (!mod) goto out_ops; diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index ab953a1a0d6c..6f4c34b6a5d6 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -217,6 +217,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); if (master) { skb->dev = master->dev; + skb_reset_mac_header(skb); hsr_forward_skb(skb, master); } else { atomic_long_inc(&dev->tx_dropped); diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index cadfccd7876e..b4e06ae08834 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -528,12 +528,6 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) { struct hsr_frame_info frame;
- if (skb_mac_header(skb) != skb->data) { - WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n", - __FILE__, __LINE__, port->dev->name); - goto out_drop; - } - if (fill_frame_info(&frame, skb, port) < 0) goto out_drop;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index 9c640d670ffe..0c1b0770c59e 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -551,9 +551,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info, desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) { - if (!info->attrs[IEEE802154_ATTR_PAN_ID] && - !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] || - info->attrs[IEEE802154_ATTR_HW_ADDR])) + if (!info->attrs[IEEE802154_ATTR_PAN_ID]) return -EINVAL;
desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]); @@ -562,6 +560,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info, desc->device_addr.mode = IEEE802154_ADDR_SHORT; desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]); } else { + if (!info->attrs[IEEE802154_ATTR_HW_ADDR]) + return -EINVAL; + desc->device_addr.mode = IEEE802154_ADDR_LONG; desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]); } diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 7c5a1aa5adb4..d1b6a9665b17 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -820,8 +820,13 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, goto nla_put_failure;
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + goto out; + if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0) goto nla_put_failure; + +out: #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
genlmsg_end(msg, hdr); @@ -1384,6 +1389,9 @@ static int nl802154_set_llsec_params(struct sk_buff *skb, u32 changed = 0; int ret;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + if (info->attrs[NL802154_ATTR_SEC_ENABLED]) { u8 enabled;
@@ -1544,7 +1552,8 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info) struct ieee802154_llsec_key_id id = { }; u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
- if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack)) + if (!info->attrs[NL802154_ATTR_SEC_KEY] || + nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack)) return -EINVAL;
if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] || @@ -1592,7 +1601,8 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info) struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1]; struct ieee802154_llsec_key_id id;
- if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack)) + if (!info->attrs[NL802154_ATTR_SEC_KEY] || + nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack)) return -EINVAL;
if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0) @@ -1757,7 +1767,8 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info) struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1]; __le64 extended_addr;
- if (nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack)) + if (!info->attrs[NL802154_ATTR_SEC_DEVICE] || + nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack)) return -EINVAL;
if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]) @@ -1913,7 +1924,8 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info struct ieee802154_llsec_device_key key; __le64 extended_addr;
- if (nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack)) + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] || + nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack)) return -EINVAL;
if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]) @@ -2085,6 +2097,9 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb, struct wpan_dev *wpan_dev = dev->ieee802154_ptr; struct ieee802154_llsec_seclevel sl;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) + return -EOPNOTSUPP; + if (!info->attrs[NL802154_ATTR_SEC_LEVEL] || llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], &sl) < 0) diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index d99e1be94019..36ed85bf2ad5 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -141,7 +141,7 @@ static void ah_output_done(struct crypto_async_request *base, int err) }
kfree(AH_SKB_CB(skb)->tmp); - xfrm_output_resume(skb, err); + xfrm_output_resume(skb->sk, skb, err); }
static int ah_output(struct xfrm_state *x, struct sk_buff *skb) diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index a3271ec3e162..4b834bbf95e0 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -279,7 +279,7 @@ static void esp_output_done(struct crypto_async_request *base, int err) x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) esp_output_tail_tcp(x, skb); else - xfrm_output_resume(skb, err); + xfrm_output_resume(skb->sk, skb, err); } }
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 5bda5aeda579..5aa7344dbec7 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -217,10 +217,12 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) && !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev) - esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK | + NETIF_F_SCTP_CRC); else if (!(features & NETIF_F_HW_ESP_TX_CSUM) && !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM)) - esp_features = features & ~NETIF_F_CSUM_MASK; + esp_features = features & ~(NETIF_F_CSUM_MASK | + NETIF_F_SCTP_CRC);
xo->flags |= XFRM_GSO_SEGMENT;
@@ -312,8 +314,17 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ ip_hdr(skb)->tot_len = htons(skb->len); ip_send_check(ip_hdr(skb));
- if (hw_offload) + if (hw_offload) { + if (!skb_ext_add(skb, SKB_EXT_SEC_PATH)) + return -ENOMEM; + + xo = xfrm_offload(skb); + if (!xo) + return -EINVAL; + + xo->flags |= XFRM_XMIT; return 0; + }
err = esp_output_tail(x, skb, &esp); if (err) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 69ea76578abb..9d2a1a247cec 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2749,6 +2749,10 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, val = up->gso_size; break;
+ case UDP_GRO: + val = up->gro_enabled; + break; + /* The following two cannot be changed on UDP sockets, the return is * always 0 (which corresponds to the full checksum coverage of UDP). */ case UDPLITE_SEND_CSCOV: diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 440080da805b..080ee7f44c64 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -316,7 +316,7 @@ static void ah6_output_done(struct crypto_async_request *base, int err) }
kfree(AH_SKB_CB(skb)->tmp); - xfrm_output_resume(skb, err); + xfrm_output_resume(skb->sk, skb, err); }
static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 2b804fcebcc6..4071cb7c7a15 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -314,7 +314,7 @@ static void esp_output_done(struct crypto_async_request *base, int err) x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) esp_output_tail_tcp(x, skb); else - xfrm_output_resume(skb, err); + xfrm_output_resume(skb->sk, skb, err); } }
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 1ca516fb30e1..4af56affaafd 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -254,9 +254,11 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, skb->encap_hdr_csum = 1;
if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) - esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK | + NETIF_F_SCTP_CRC); else if (!(features & NETIF_F_HW_ESP_TX_CSUM)) - esp_features = features & ~NETIF_F_CSUM_MASK; + esp_features = features & ~(NETIF_F_CSUM_MASK | + NETIF_F_SCTP_CRC);
xo->flags |= XFRM_GSO_SEGMENT;
@@ -346,8 +348,17 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
ipv6_hdr(skb)->payload_len = htons(len);
- if (hw_offload) + if (hw_offload) { + if (!skb_ext_add(skb, SKB_EXT_SEC_PATH)) + return -ENOMEM; + + xo = xfrm_offload(skb); + if (!xo) + return -EINVAL; + + xo->flags |= XFRM_XMIT; return 0; + }
err = esp6_output_tail(x, skb, &esp); if (err) diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 1f56d9aae589..bf3646b57c68 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -298,7 +298,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) */ v4addr = LOOPBACK4_IPV6; if (!(addr_type & IPV6_ADDR_MULTICAST) && - !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) { + !ipv6_can_nonlocal_bind(sock_net(sk), inet)) { err = -EADDRNOTAVAIL; if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, dev, 0)) { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0bbfaa55e3c8..4bba6d21ffa0 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -5203,9 +5203,11 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, * nexthops have been replaced by first new, the rest should * be added to it. */ - cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | - NLM_F_REPLACE); - cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; + if (cfg->fc_nlinfo.nlh) { + cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | + NLM_F_REPLACE); + cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; + } nhn++; }
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9db648a91a4f..b7155b078b19 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4707,7 +4707,10 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t) timeout = sta->rx_stats.last_rx; timeout += IEEE80211_CONNECTION_IDLE_TIME;
- if (time_is_before_jiffies(timeout)) { + /* If timeout is after now, then update timer to fire at + * the later date, but do not actually probe at this time. + */ + if (time_is_after_jiffies(timeout)) { mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout)); return; } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ebb3228ce971..64fae4f645f5 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3578,7 +3578,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags)) goto out;
- if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) { + if (vif->txqs_stopped[txq->ac]) { set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags); goto out; } diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index 585d33144c33..55550ead2ced 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -152,7 +152,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template) crypto_free_sync_skcipher(key->tfm0); err_tfm: for (i = 0; i < ARRAY_SIZE(key->tfm); i++) - if (key->tfm[i]) + if (!IS_ERR_OR_NULL(key->tfm[i])) crypto_free_aead(key->tfm[i]);
kfree_sensitive(key); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 5932b0ebecc3..e337b35a368f 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -11,7 +11,6 @@ #include <linux/netdevice.h> #include <linux/sched/signal.h> #include <linux/atomic.h> -#include <linux/igmp.h> #include <net/sock.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> @@ -20,7 +19,6 @@ #include <net/tcp_states.h> #if IS_ENABLED(CONFIG_MPTCP_IPV6) #include <net/transp_v6.h> -#include <net/addrconf.h> #endif #include <net/mptcp.h> #include <net/xfrm.h> @@ -2863,6 +2861,48 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, return ret; }
+static bool mptcp_unsupported(int level, int optname) +{ + if (level == SOL_IP) { + switch (optname) { + case IP_ADD_MEMBERSHIP: + case IP_ADD_SOURCE_MEMBERSHIP: + case IP_DROP_MEMBERSHIP: + case IP_DROP_SOURCE_MEMBERSHIP: + case IP_BLOCK_SOURCE: + case IP_UNBLOCK_SOURCE: + case MCAST_JOIN_GROUP: + case MCAST_LEAVE_GROUP: + case MCAST_JOIN_SOURCE_GROUP: + case MCAST_LEAVE_SOURCE_GROUP: + case MCAST_BLOCK_SOURCE: + case MCAST_UNBLOCK_SOURCE: + case MCAST_MSFILTER: + return true; + } + return false; + } + if (level == SOL_IPV6) { + switch (optname) { + case IPV6_ADDRFORM: + case IPV6_ADD_MEMBERSHIP: + case IPV6_DROP_MEMBERSHIP: + case IPV6_JOIN_ANYCAST: + case IPV6_LEAVE_ANYCAST: + case MCAST_JOIN_GROUP: + case MCAST_LEAVE_GROUP: + case MCAST_JOIN_SOURCE_GROUP: + case MCAST_LEAVE_SOURCE_GROUP: + case MCAST_BLOCK_SOURCE: + case MCAST_UNBLOCK_SOURCE: + case MCAST_MSFILTER: + return true; + } + return false; + } + return false; +} + static int mptcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) { @@ -2871,6 +2911,9 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
pr_debug("msk=%p", msk);
+ if (mptcp_unsupported(level, optname)) + return -ENOPROTOOPT; + if (level == SOL_SOCKET) return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
@@ -3379,34 +3422,10 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, return mask; }
-static int mptcp_release(struct socket *sock) -{ - struct mptcp_subflow_context *subflow; - struct sock *sk = sock->sk; - struct mptcp_sock *msk; - - if (!sk) - return 0; - - lock_sock(sk); - - msk = mptcp_sk(sk); - - mptcp_for_each_subflow(msk, subflow) { - struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - - ip_mc_drop_socket(ssk); - } - - release_sock(sk); - - return inet_release(sock); -} - static const struct proto_ops mptcp_stream_ops = { .family = PF_INET, .owner = THIS_MODULE, - .release = mptcp_release, + .release = inet_release, .bind = mptcp_bind, .connect = mptcp_stream_connect, .socketpair = sock_no_socketpair, @@ -3453,35 +3472,10 @@ void __init mptcp_proto_init(void) }
#if IS_ENABLED(CONFIG_MPTCP_IPV6) -static int mptcp6_release(struct socket *sock) -{ - struct mptcp_subflow_context *subflow; - struct mptcp_sock *msk; - struct sock *sk = sock->sk; - - if (!sk) - return 0; - - lock_sock(sk); - - msk = mptcp_sk(sk); - - mptcp_for_each_subflow(msk, subflow) { - struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - - ip_mc_drop_socket(ssk); - ipv6_sock_mc_close(ssk); - ipv6_sock_ac_close(ssk); - } - - release_sock(sk); - return inet6_release(sock); -} - static const struct proto_ops mptcp_v6_stream_ops = { .family = PF_INET6, .owner = THIS_MODULE, - .release = mptcp6_release, + .release = inet6_release, .bind = mptcp_bind, .connect = mptcp_stream_connect, .socketpair = sock_no_socketpair, diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index a9cb355324d1..ffff8da707b8 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -105,13 +105,20 @@ static void ncsi_channel_monitor(struct timer_list *t) monitor_state = nc->monitor.state; spin_unlock_irqrestore(&nc->lock, flags);
- if (!enabled || chained) { - ncsi_stop_channel_monitor(nc); - return; - } + if (!enabled) + return; /* expected race disabling timer */ + if (WARN_ON_ONCE(chained)) + goto bad_state; + if (state != NCSI_CHANNEL_INACTIVE && state != NCSI_CHANNEL_ACTIVE) { - ncsi_stop_channel_monitor(nc); +bad_state: + netdev_warn(ndp->ndev.dev, + "Bad NCSI monitor state channel %d 0x%x %s queue\n", + nc->id, state, chained ? "on" : "off"); + spin_lock_irqsave(&nc->lock, flags); + nc->monitor.enabled = false; + spin_unlock_irqrestore(&nc->lock, flags); return; }
@@ -136,10 +143,9 @@ static void ncsi_channel_monitor(struct timer_list *t) ncsi_report_link(ndp, true); ndp->flags |= NCSI_DEV_RESHUFFLE;
- ncsi_stop_channel_monitor(nc); - ncm = &nc->modes[NCSI_MODE_LINK]; spin_lock_irqsave(&nc->lock, flags); + nc->monitor.enabled = false; nc->state = NCSI_CHANNEL_INVISIBLE; ncm->data[2] &= ~0x1; spin_unlock_irqrestore(&nc->lock, flags); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index d257ed3b732a..a3b46f888803 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -108,11 +108,13 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) llcp_sock->service_name_len, GFP_KERNEL); if (!llcp_sock->service_name) { + nfc_llcp_local_put(llcp_sock->local); ret = -ENOMEM; goto put_dev; } llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); if (llcp_sock->ssap == LLCP_SAP_MAX) { + nfc_llcp_local_put(llcp_sock->local); kfree(llcp_sock->service_name); llcp_sock->service_name = NULL; ret = -EADDRINUSE; @@ -671,6 +673,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, ret = -EISCONN; goto error; } + if (sk->sk_state == LLCP_CONNECTING) { + ret = -EINPROGRESS; + goto error; + }
dev = nfc_get_device(addr->dev_idx); if (dev == NULL) { @@ -702,6 +708,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, llcp_sock->local = nfc_llcp_local_get(local); llcp_sock->ssap = nfc_llcp_get_local_ssap(local); if (llcp_sock->ssap == LLCP_SAP_MAX) { + nfc_llcp_local_put(llcp_sock->local); ret = -ENOMEM; goto put_dev; } @@ -743,9 +750,12 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
sock_unlink: nfc_llcp_sock_unlink(&local->connecting_sockets, sk); + kfree(llcp_sock->service_name); + llcp_sock->service_name = NULL;
sock_llcp_release: nfc_llcp_put_ssap(local, llcp_sock->ssap); + nfc_llcp_local_put(llcp_sock->local);
put_dev: nfc_put_device(dev); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 5eddfe7bd391..2316efd6ace8 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -2032,10 +2032,10 @@ static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit, static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info, struct sk_buff *reply) { - struct ovs_zone_limit zone_limit; - - zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE; - zone_limit.limit = info->default_limit; + struct ovs_zone_limit zone_limit = { + .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE, + .limit = info->default_limit, + };
return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit); } diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index dfc820ee553a..1e4fb568fa84 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -271,7 +271,10 @@ static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port, flow = kzalloc(sizeof(*flow), GFP_KERNEL); if (flow) { init_waitqueue_head(&flow->resume_tx); - radix_tree_insert(&node->qrtr_tx_flow, key, flow); + if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) { + kfree(flow); + flow = NULL; + } } } mutex_unlock(&node->qrtr_tx_lock); diff --git a/net/rds/message.c b/net/rds/message.c index 071a261fdaab..799034e0f513 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -347,8 +347,9 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE); rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); if (IS_ERR(rm->data.op_sg)) { + void *err = ERR_CAST(rm->data.op_sg); rds_message_put(rm); - return ERR_CAST(rm->data.op_sg); + return err; }
for (i = 0; i < rm->data.op_nents; ++i) { diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 68d6ef9e59fc..ac15a944573f 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -69,7 +69,7 @@ struct rfkill {
struct rfkill_int_event { struct list_head list; - struct rfkill_event ev; + struct rfkill_event_ext ev; };
struct rfkill_data { @@ -253,7 +253,8 @@ static void rfkill_global_led_trigger_unregister(void) } #endif /* CONFIG_RFKILL_LEDS */
-static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, +static void rfkill_fill_event(struct rfkill_event_ext *ev, + struct rfkill *rfkill, enum rfkill_operation op) { unsigned long flags; @@ -1237,7 +1238,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct rfkill *rfkill; - struct rfkill_event ev; + struct rfkill_event_ext ev; int ret;
/* we don't need the 'hard' variable but accept it */ diff --git a/net/sched/act_api.c b/net/sched/act_api.c index b919826939e0..f6d5755d669e 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -158,7 +158,7 @@ static int __tcf_action_put(struct tc_action *p, bool bind) return 0; }
-int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) +static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) { int ret = 0;
@@ -184,7 +184,18 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
return ret; } -EXPORT_SYMBOL(__tcf_idr_release); + +int tcf_idr_release(struct tc_action *a, bool bind) +{ + const struct tc_action_ops *ops = a->ops; + int ret; + + ret = __tcf_idr_release(a, bind, false); + if (ret == ACT_P_DELETED) + module_put(ops->owner); + return ret; +} +EXPORT_SYMBOL(tcf_idr_release);
static size_t tcf_action_shared_attrs_size(const struct tc_action *act) { @@ -493,6 +504,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, }
p->idrinfo = idrinfo; + __module_get(ops->owner); p->ops = ops; *a = p; return 0; @@ -992,7 +1004,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla, struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, - struct tc_action_ops *a_o, bool rtnl_held, + struct tc_action_ops *a_o, int *init_res, + bool rtnl_held, struct netlink_ext_ack *extack) { struct nla_bitfield32 flags = { 0, 0 }; @@ -1028,6 +1041,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, } if (err < 0) goto err_out; + *init_res = err;
if (!name && tb[TCA_ACT_COOKIE]) tcf_set_action_cookie(&a->act_cookie, cookie); @@ -1035,13 +1049,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, if (!name) a->hw_stats = hw_stats;
- /* module count goes up only when brand new policy is created - * if it exists and is only bound to in a_o->init() then - * ACT_P_CREATED is not returned (a zero is). - */ - if (err != ACT_P_CREATED) - module_put(a_o->owner); - return a;
err_out: @@ -1056,7 +1063,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, - struct tc_action *actions[], size_t *attr_size, + struct tc_action *actions[], int init_res[], size_t *attr_size, bool rtnl_held, struct netlink_ext_ack *extack) { struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {}; @@ -1084,7 +1091,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind, - ops[i - 1], rtnl_held, extack); + ops[i - 1], &init_res[i - 1], rtnl_held, + extack); if (IS_ERR(act)) { err = PTR_ERR(act); goto err; @@ -1100,7 +1108,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, tcf_idr_insert_many(actions);
*attr_size = tcf_action_full_attrs_size(sz); - return i - 1; + err = i - 1; + goto err_mod;
err: tcf_action_destroy(actions, bind); @@ -1497,12 +1506,13 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, struct netlink_ext_ack *extack) { size_t attr_size = 0; - int loop, ret; + int loop, ret, i; struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; + int init_res[TCA_ACT_MAX_PRIO] = {};
for (loop = 0; loop < 10; loop++) { ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, - actions, &attr_size, true, extack); + actions, init_res, &attr_size, true, extack); if (ret != -EAGAIN) break; } @@ -1510,8 +1520,12 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, if (ret < 0) return ret; ret = tcf_add_notify(net, n, actions, portid, attr_size, extack); - if (ovr) - tcf_action_put_many(actions); + + /* only put existing actions */ + for (i = 0; i < TCA_ACT_MAX_PRIO; i++) + if (init_res[i] == ACT_P_CREATED) + actions[i] = NULL; + tcf_action_put_many(actions);
return ret; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index e37556cc37ab..b3a2cba130a1 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -646,7 +646,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) struct net_device *dev = block_cb->indr.dev; struct Qdisc *sch = block_cb->indr.sch; struct netlink_ext_ack extack = {}; - struct flow_block_offload bo; + struct flow_block_offload bo = {};
tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, block_cb->indr.binder_type, @@ -3039,6 +3039,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, { #ifdef CONFIG_NET_CLS_ACT { + int init_res[TCA_ACT_MAX_PRIO] = {}; struct tc_action *act; size_t attr_size = 0;
@@ -3050,12 +3051,11 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, return PTR_ERR(a_o); act = tcf_action_init_1(net, tp, tb[exts->police], rate_tlv, "police", ovr, - TCA_ACT_BIND, a_o, rtnl_held, - extack); - if (IS_ERR(act)) { - module_put(a_o->owner); + TCA_ACT_BIND, a_o, init_res, + rtnl_held, extack); + module_put(a_o->owner); + if (IS_ERR(act)) return PTR_ERR(act); - }
act->type = exts->type = TCA_OLD_COMPAT; exts->actions[0] = act; @@ -3066,8 +3066,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
err = tcf_action_init(net, tp, tb[exts->action], rate_tlv, NULL, ovr, TCA_ACT_BIND, - exts->actions, &attr_size, - rtnl_held, extack); + exts->actions, init_res, + &attr_size, rtnl_held, extack); if (err < 0) return err; exts->nr_actions = err; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 2f1f0a378408..6af6b95bdb67 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -134,6 +134,9 @@ teql_destroy(struct Qdisc *sch) struct teql_sched_data *dat = qdisc_priv(sch); struct teql_master *master = dat->m;
+ if (!master) + return; + prev = master->slaves; if (prev) { do { diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index c3e89c776e66..bd08807c9e44 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -664,8 +664,8 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) if (!(type & IPV6_ADDR_UNICAST)) return 0;
- return sp->inet.freebind || net->ipv6.sysctl.ip_nonlocal_bind || - ipv6_chk_addr(net, in6, NULL, 0); + return ipv6_can_nonlocal_bind(net, &sp->inet) || + ipv6_chk_addr(net, in6, NULL, 0); }
/* This function checks if the address is a valid address to be used for @@ -954,8 +954,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) net = sock_net(&opt->inet.sk); rcu_read_lock(); dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id); - if (!dev || !(opt->inet.freebind || - net->ipv6.sysctl.ip_nonlocal_bind || + if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) || ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0))) { rcu_read_unlock(); diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index f4fca8f7f63f..97710ce36047 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -1941,12 +1941,13 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, goto rcv; if (tipc_aead_clone(&tmp, aead) < 0) goto rcv; + WARN_ON(!refcount_inc_not_zero(&tmp->refcnt)); if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) { tipc_aead_free(&tmp->rcu); goto rcv; } tipc_aead_put(aead); - aead = tipc_aead_get(tmp); + aead = tmp; }
if (unlikely(err)) { diff --git a/net/tipc/socket.c b/net/tipc/socket.c index cebcc104dc70..022999e0202d 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1265,7 +1265,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, spin_lock_bh(&inputq->lock); if (skb_peek(arrvq) == skb) { skb_queue_splice_tail_init(&tmpq, inputq); - kfree_skb(__skb_dequeue(arrvq)); + __skb_dequeue(arrvq); } spin_unlock_bh(&inputq->lock); __skb_queue_purge(&tmpq); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 775d0c4d86c3..1f2dff186cb6 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5,7 +5,7 @@ * Copyright 2006-2010 Johannes Berg johannes@sipsolutions.net * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */
#include <linux/if.h> @@ -209,9 +209,13 @@ static int validate_beacon_head(const struct nlattr *attr, unsigned int len = nla_len(attr); const struct element *elem; const struct ieee80211_mgmt *mgmt = (void *)data; - bool s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control); unsigned int fixedlen, hdrlen; + bool s1g_bcn;
+ if (len < offsetofend(typeof(*mgmt), frame_control)) + goto err; + + s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control); if (s1g_bcn) { fixedlen = offsetof(struct ieee80211_ext, u.s1g_beacon.variable); @@ -5397,7 +5401,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP], ¶ms); if (err) - return err; + goto out; }
nl80211_calculate_ap_params(¶ms); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 1b7fec3b53cd..1f1241443a1c 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -2352,14 +2352,16 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, return NULL;
if (ext) { - struct ieee80211_s1g_bcn_compat_ie *compat; - u8 *ie; + const struct ieee80211_s1g_bcn_compat_ie *compat; + const struct element *elem;
- ie = (void *)cfg80211_find_ie(WLAN_EID_S1G_BCN_COMPAT, - variable, ielen); - if (!ie) + elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT, + variable, ielen); + if (!elem) + return NULL; + if (elem->datalen < sizeof(*compat)) return NULL; - compat = (void *)(ie + 2); + compat = (void *)elem->data; bssid = ext->u.s1g_beacon.sa; capability = le16_to_cpu(compat->compat_info); beacon_int = le16_to_cpu(compat->beacon_int); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 38df713f2e2e..060e365c8259 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -530,7 +530,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev, cfg80211_sme_free(wdev); }
- if (WARN_ON(wdev->conn)) + if (wdev->conn) return -EINPROGRESS;
wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index d8e8a11ca845..a20aec9d7393 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -216,7 +216,7 @@ static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb, case XFRM_MSG_GETSADINFO: case XFRM_MSG_GETSPDINFO: default: - WARN_ONCE(1, "unsupported nlmsg_type %d", nlh_src->nlmsg_type); + pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type); return ERR_PTR(-EOPNOTSUPP); }
@@ -277,7 +277,7 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src) return xfrm_nla_cpy(dst, src, nla_len(src)); default: BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID); - WARN_ONCE(1, "unsupported nla_type %d", src->nla_type); + pr_warn_once("unsupported nla_type %d\n", src->nla_type); return -EOPNOTSUPP; } } @@ -315,8 +315,10 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src struct sk_buff *new = NULL; int err;
- if (WARN_ON_ONCE(type >= ARRAY_SIZE(xfrm_msg_min))) + if (type >= ARRAY_SIZE(xfrm_msg_min)) { + pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type); return -EOPNOTSUPP; + }
if (skb_shinfo(skb)->frag_list == NULL) { new = alloc_skb(skb->len + skb_tailroom(skb), GFP_ATOMIC); @@ -378,6 +380,10 @@ static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src, struct nlmsghdr *nlmsg = dst; struct nlattr *nla;
+ /* xfrm_user_rcv_msg_compat() relies on fact that 32-bit messages + * have the same len or shorted than 64-bit ones. + * 32-bit translation that is bigger than 64-bit original is unexpected. + */ if (WARN_ON_ONCE(copy_len > payload)) copy_len = payload;
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index edf11893dbe8..6d6917b68856 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -134,8 +134,6 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; }
- xo->flags |= XFRM_XMIT; - if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) { struct sk_buff *segs;
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 697cdcfbb5e1..3f42c2f15ba4 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -305,6 +305,8 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); } else { + if (!(ip_hdr(skb)->frag_off & htons(IP_DF))) + goto xmit; icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); } @@ -313,6 +315,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) return -EMSGSIZE; }
+xmit: xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev))); skb_dst_set(skb, dst); skb->dev = tdev; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index a7ab19353313..b81ca117dac7 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -503,22 +503,22 @@ static int xfrm_output_one(struct sk_buff *skb, int err) return err; }
-int xfrm_output_resume(struct sk_buff *skb, int err) +int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err) { struct net *net = xs_net(skb_dst(skb)->xfrm);
while (likely((err = xfrm_output_one(skb, err)) == 0)) { nf_reset_ct(skb);
- err = skb_dst(skb)->ops->local_out(net, skb->sk, skb); + err = skb_dst(skb)->ops->local_out(net, sk, skb); if (unlikely(err != 1)) goto out;
if (!skb_dst(skb)->xfrm) - return dst_output(net, skb->sk, skb); + return dst_output(net, sk, skb);
err = nf_hook(skb_dst(skb)->ops->family, - NF_INET_POST_ROUTING, net, skb->sk, skb, + NF_INET_POST_ROUTING, net, sk, skb, NULL, skb_dst(skb)->dev, xfrm_output2); if (unlikely(err != 1)) goto out; @@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(xfrm_output_resume);
static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb) { - return xfrm_output_resume(skb, 1); + return xfrm_output_resume(sk, skb, 1); }
static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index d01ca1a18418..ffd315cff984 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -44,7 +44,6 @@ static void xfrm_state_gc_task(struct work_struct *work); */
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; -static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation); static struct kmem_cache *xfrm_state_cache __ro_after_init;
static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task); @@ -140,7 +139,7 @@ static void xfrm_hash_resize(struct work_struct *work) }
spin_lock_bh(&net->xfrm.xfrm_state_lock); - write_seqcount_begin(&xfrm_state_hash_generation); + write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net); @@ -156,7 +155,7 @@ static void xfrm_hash_resize(struct work_struct *work) rcu_assign_pointer(net->xfrm.state_byspi, nspi); net->xfrm.state_hmask = nhashmask;
- write_seqcount_end(&xfrm_state_hash_generation); + write_seqcount_end(&net->xfrm.xfrm_state_hash_generation); spin_unlock_bh(&net->xfrm.xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head); @@ -1063,7 +1062,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
to_put = NULL;
- sequence = read_seqcount_begin(&xfrm_state_hash_generation); + sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
rcu_read_lock(); h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); @@ -1176,7 +1175,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, if (to_put) xfrm_state_put(to_put);
- if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) { + if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) { *err = -EAGAIN; if (x) { xfrm_state_put(x); @@ -2666,6 +2665,7 @@ int __net_init xfrm_state_init(struct net *net) net->xfrm.state_num = 0; INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); spin_lock_init(&net->xfrm.xfrm_state_lock); + seqcount_init(&net->xfrm.xfrm_state_hash_generation); return 0;
out_byspi: diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c index 0172d87e2b9a..364b2ef9b36f 100644 --- a/security/selinux/ss/avtab.c +++ b/security/selinux/ss/avtab.c @@ -109,7 +109,7 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat struct avtab_node *prev, *cur, *newnode; u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h) + if (!h || !h->nslot) return -EINVAL;
hvalue = avtab_hash(key, h->mask); @@ -154,7 +154,7 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu struct avtab_node *prev, *cur; u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h) + if (!h || !h->nslot) return NULL; hvalue = avtab_hash(key, h->mask); for (prev = NULL, cur = h->htable[hvalue]; @@ -184,7 +184,7 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key) struct avtab_node *cur; u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h) + if (!h || !h->nslot) return NULL;
hvalue = avtab_hash(key, h->mask); @@ -220,7 +220,7 @@ avtab_search_node(struct avtab *h, struct avtab_key *key) struct avtab_node *cur; u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h) + if (!h || !h->nslot) return NULL;
hvalue = avtab_hash(key, h->mask); @@ -295,6 +295,7 @@ void avtab_destroy(struct avtab *h) } kvfree(h->htable); h->htable = NULL; + h->nel = 0; h->nslot = 0; h->mask = 0; } @@ -303,88 +304,52 @@ void avtab_init(struct avtab *h) { h->htable = NULL; h->nel = 0; + h->nslot = 0; + h->mask = 0; }
-int avtab_alloc(struct avtab *h, u32 nrules) +static int avtab_alloc_common(struct avtab *h, u32 nslot) { - u32 mask = 0; - u32 shift = 0; - u32 work = nrules; - u32 nslot = 0; - - if (nrules == 0) - goto avtab_alloc_out; - - while (work) { - work = work >> 1; - shift++; - } - if (shift > 2) - shift = shift - 2; - nslot = 1 << shift; - if (nslot > MAX_AVTAB_HASH_BUCKETS) - nslot = MAX_AVTAB_HASH_BUCKETS; - mask = nslot - 1; + if (!nslot) + return 0;
h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL); if (!h->htable) return -ENOMEM;
- avtab_alloc_out: - h->nel = 0; h->nslot = nslot; - h->mask = mask; - pr_debug("SELinux: %d avtab hash slots, %d rules.\n", - h->nslot, nrules); + h->mask = nslot - 1; return 0; }
-int avtab_duplicate(struct avtab *new, struct avtab *orig) +int avtab_alloc(struct avtab *h, u32 nrules) { - int i; - struct avtab_node *node, *tmp, *tail; - - memset(new, 0, sizeof(*new)); + int rc; + u32 nslot = 0;
- new->htable = kvcalloc(orig->nslot, sizeof(void *), GFP_KERNEL); - if (!new->htable) - return -ENOMEM; - new->nslot = orig->nslot; - new->mask = orig->mask; - - for (i = 0; i < orig->nslot; i++) { - tail = NULL; - for (node = orig->htable[i]; node; node = node->next) { - tmp = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL); - if (!tmp) - goto error; - tmp->key = node->key; - if (tmp->key.specified & AVTAB_XPERMS) { - tmp->datum.u.xperms = - kmem_cache_zalloc(avtab_xperms_cachep, - GFP_KERNEL); - if (!tmp->datum.u.xperms) { - kmem_cache_free(avtab_node_cachep, tmp); - goto error; - } - tmp->datum.u.xperms = node->datum.u.xperms; - } else - tmp->datum.u.data = node->datum.u.data; - - if (tail) - tail->next = tmp; - else - new->htable[i] = tmp; - - tail = tmp; - new->nel++; + if (nrules != 0) { + u32 shift = 1; + u32 work = nrules >> 3; + while (work) { + work >>= 1; + shift++; } + nslot = 1 << shift; + if (nslot > MAX_AVTAB_HASH_BUCKETS) + nslot = MAX_AVTAB_HASH_BUCKETS; + + rc = avtab_alloc_common(h, nslot); + if (rc) + return rc; }
+ pr_debug("SELinux: %d avtab hash slots, %d rules.\n", nslot, nrules); return 0; -error: - avtab_destroy(new); - return -ENOMEM; +} + +int avtab_alloc_dup(struct avtab *new, const struct avtab *orig) +{ + return avtab_alloc_common(new, orig->nslot); }
void avtab_hash_eval(struct avtab *h, char *tag) diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h index 4c4445ca9118..f2eeb36265d1 100644 --- a/security/selinux/ss/avtab.h +++ b/security/selinux/ss/avtab.h @@ -89,7 +89,7 @@ struct avtab {
void avtab_init(struct avtab *h); int avtab_alloc(struct avtab *, u32); -int avtab_duplicate(struct avtab *new, struct avtab *orig); +int avtab_alloc_dup(struct avtab *new, const struct avtab *orig); struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k); void avtab_destroy(struct avtab *h); void avtab_hash_eval(struct avtab *h, char *tag); diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c index 0b32f3ab025e..1ef74c085f2b 100644 --- a/security/selinux/ss/conditional.c +++ b/security/selinux/ss/conditional.c @@ -605,7 +605,6 @@ static int cond_dup_av_list(struct cond_av_list *new, struct cond_av_list *orig, struct avtab *avtab) { - struct avtab_node *avnode; u32 i;
memset(new, 0, sizeof(*new)); @@ -615,10 +614,11 @@ static int cond_dup_av_list(struct cond_av_list *new, return -ENOMEM;
for (i = 0; i < orig->len; i++) { - avnode = avtab_search_node(avtab, &orig->nodes[i]->key); - if (WARN_ON(!avnode)) - return -EINVAL; - new->nodes[i] = avnode; + new->nodes[i] = avtab_insert_nonunique(avtab, + &orig->nodes[i]->key, + &orig->nodes[i]->datum); + if (!new->nodes[i]) + return -ENOMEM; new->len++; }
@@ -630,7 +630,7 @@ static int duplicate_policydb_cond_list(struct policydb *newp, { int rc, i, j;
- rc = avtab_duplicate(&newp->te_cond_avtab, &origp->te_cond_avtab); + rc = avtab_alloc_dup(&newp->te_cond_avtab, &origp->te_cond_avtab); if (rc) return rc;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 8d9bbd39ab9a..b09138000185 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -1551,6 +1551,7 @@ static int security_context_to_sid_core(struct selinux_state *state, if (!str) goto out; } +retry: rcu_read_lock(); policy = rcu_dereference(state->policy); policydb = &policy->policydb; @@ -1564,6 +1565,15 @@ static int security_context_to_sid_core(struct selinux_state *state, } else if (rc) goto out_unlock; rc = sidtab_context_to_sid(sidtab, &context, sid); + if (rc == -ESTALE) { + rcu_read_unlock(); + if (context.str) { + str = context.str; + context.str = NULL; + } + context_destroy(&context); + goto retry; + } context_destroy(&context); out_unlock: rcu_read_unlock(); @@ -1713,7 +1723,7 @@ static int security_compute_sid(struct selinux_state *state, struct selinux_policy *policy; struct policydb *policydb; struct sidtab *sidtab; - struct class_datum *cladatum = NULL; + struct class_datum *cladatum; struct context *scontext, *tcontext, newcontext; struct sidtab_entry *sentry, *tentry; struct avtab_key avkey; @@ -1735,6 +1745,8 @@ static int security_compute_sid(struct selinux_state *state, goto out; }
+retry: + cladatum = NULL; context_init(&newcontext);
rcu_read_lock(); @@ -1879,6 +1891,11 @@ static int security_compute_sid(struct selinux_state *state, } /* Obtain the sid for the context. */ rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid); + if (rc == -ESTALE) { + rcu_read_unlock(); + context_destroy(&newcontext); + goto retry; + } out_unlock: rcu_read_unlock(); context_destroy(&newcontext); @@ -2190,6 +2207,7 @@ void selinux_policy_commit(struct selinux_state *state, struct selinux_load_state *load_state) { struct selinux_policy *oldpolicy, *newpolicy = load_state->policy; + unsigned long flags; u32 seqno;
oldpolicy = rcu_dereference_protected(state->policy, @@ -2211,7 +2229,13 @@ void selinux_policy_commit(struct selinux_state *state, seqno = newpolicy->latest_granting;
/* Install the new policy. */ - rcu_assign_pointer(state->policy, newpolicy); + if (oldpolicy) { + sidtab_freeze_begin(oldpolicy->sidtab, &flags); + rcu_assign_pointer(state->policy, newpolicy); + sidtab_freeze_end(oldpolicy->sidtab, &flags); + } else { + rcu_assign_pointer(state->policy, newpolicy); + }
/* Load the policycaps from the new policy */ security_load_policycaps(state, newpolicy); @@ -2355,13 +2379,15 @@ int security_port_sid(struct selinux_state *state, struct policydb *policydb; struct sidtab *sidtab; struct ocontext *c; - int rc = 0; + int rc;
if (!selinux_initialized(state)) { *out_sid = SECINITSID_PORT; return 0; }
+retry: + rc = 0; rcu_read_lock(); policy = rcu_dereference(state->policy); policydb = &policy->policydb; @@ -2380,6 +2406,10 @@ int security_port_sid(struct selinux_state *state, if (!c->sid[0]) { rc = sidtab_context_to_sid(sidtab, &c->context[0], &c->sid[0]); + if (rc == -ESTALE) { + rcu_read_unlock(); + goto retry; + } if (rc) goto out; } @@ -2406,13 +2436,15 @@ int security_ib_pkey_sid(struct selinux_state *state, struct policydb *policydb; struct sidtab *sidtab; struct ocontext *c; - int rc = 0; + int rc;
if (!selinux_initialized(state)) { *out_sid = SECINITSID_UNLABELED; return 0; }
+retry: + rc = 0; rcu_read_lock(); policy = rcu_dereference(state->policy); policydb = &policy->policydb; @@ -2433,6 +2465,10 @@ int security_ib_pkey_sid(struct selinux_state *state, rc = sidtab_context_to_sid(sidtab, &c->context[0], &c->sid[0]); + if (rc == -ESTALE) { + rcu_read_unlock(); + goto retry; + } if (rc) goto out; } @@ -2458,13 +2494,15 @@ int security_ib_endport_sid(struct selinux_state *state, struct policydb *policydb; struct sidtab *sidtab; struct ocontext *c; - int rc = 0; + int rc;
if (!selinux_initialized(state)) { *out_sid = SECINITSID_UNLABELED; return 0; }
+retry: + rc = 0; rcu_read_lock(); policy = rcu_dereference(state->policy); policydb = &policy->policydb; @@ -2485,6 +2523,10 @@ int security_ib_endport_sid(struct selinux_state *state, if (!c->sid[0]) { rc = sidtab_context_to_sid(sidtab, &c->context[0], &c->sid[0]); + if (rc == -ESTALE) { + rcu_read_unlock(); + goto retry; + } if (rc) goto out; } @@ -2508,7 +2550,7 @@ int security_netif_sid(struct selinux_state *state, struct selinux_policy *policy; struct policydb *policydb; struct sidtab *sidtab; - int rc = 0; + int rc; struct ocontext *c;
if (!selinux_initialized(state)) { @@ -2516,6 +2558,8 @@ int security_netif_sid(struct selinux_state *state, return 0; }
+retry: + rc = 0; rcu_read_lock(); policy = rcu_dereference(state->policy); policydb = &policy->policydb; @@ -2532,10 +2576,18 @@ int security_netif_sid(struct selinux_state *state, if (!c->sid[0] || !c->sid[1]) { rc = sidtab_context_to_sid(sidtab, &c->context[0], &c->sid[0]); + if (rc == -ESTALE) { + rcu_read_unlock(); + goto retry; + } if (rc) goto out; rc = sidtab_context_to_sid(sidtab, &c->context[1], &c->sid[1]); + if (rc == -ESTALE) { + rcu_read_unlock(); + goto retry; + } if (rc) goto out; } @@ -2585,6 +2637,7 @@ int security_node_sid(struct selinux_state *state, return 0; }
+retry: rcu_read_lock(); policy = rcu_dereference(state->policy); policydb = &policy->policydb; @@ -2633,6 +2686,10 @@ int security_node_sid(struct selinux_state *state, rc = sidtab_context_to_sid(sidtab, &c->context[0], &c->sid[0]); + if (rc == -ESTALE) { + rcu_read_unlock(); + goto retry; + } if (rc) goto out; } @@ -2674,18 +2731,24 @@ int security_get_user_sids(struct selinux_state *state, struct sidtab *sidtab; struct context *fromcon, usercon; u32 *mysids = NULL, *mysids2, sid; - u32 mynel = 0, maxnel = SIDS_NEL; + u32 i, j, mynel, maxnel = SIDS_NEL; struct user_datum *user; struct role_datum *role; struct ebitmap_node *rnode, *tnode; - int rc = 0, i, j; + int rc;
*sids = NULL; *nel = 0;
if (!selinux_initialized(state)) - goto out; + return 0; + + mysids = kcalloc(maxnel, sizeof(*mysids), GFP_KERNEL); + if (!mysids) + return -ENOMEM;
+retry: + mynel = 0; rcu_read_lock(); policy = rcu_dereference(state->policy); policydb = &policy->policydb; @@ -2705,11 +2768,6 @@ int security_get_user_sids(struct selinux_state *state,
usercon.user = user->value;
- rc = -ENOMEM; - mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC); - if (!mysids) - goto out_unlock; - ebitmap_for_each_positive_bit(&user->roles, rnode, i) { role = policydb->role_val_to_struct[i]; usercon.role = i + 1; @@ -2721,6 +2779,10 @@ int security_get_user_sids(struct selinux_state *state, continue;
rc = sidtab_context_to_sid(sidtab, &usercon, &sid); + if (rc == -ESTALE) { + rcu_read_unlock(); + goto retry; + } if (rc) goto out_unlock; if (mynel < maxnel) { @@ -2743,14 +2805,14 @@ int security_get_user_sids(struct selinux_state *state, rcu_read_unlock(); if (rc || !mynel) { kfree(mysids); - goto out; + return rc; }
rc = -ENOMEM; mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL); if (!mysids2) { kfree(mysids); - goto out; + return rc; } for (i = 0, j = 0; i < mynel; i++) { struct av_decision dummy_avd; @@ -2763,12 +2825,10 @@ int security_get_user_sids(struct selinux_state *state, mysids2[j++] = mysids[i]; cond_resched(); } - rc = 0; kfree(mysids); *sids = mysids2; *nel = j; -out: - return rc; + return 0; }
/** @@ -2781,6 +2841,9 @@ int security_get_user_sids(struct selinux_state *state, * Obtain a SID to use for a file in a filesystem that * cannot support xattr or use a fixed labeling behavior like * transition SIDs or task SIDs. + * + * WARNING: This function may return -ESTALE, indicating that the caller + * must retry the operation after re-acquiring the policy pointer! */ static inline int __security_genfs_sid(struct selinux_policy *policy, const char *fstype, @@ -2859,11 +2922,13 @@ int security_genfs_sid(struct selinux_state *state, return 0; }
- rcu_read_lock(); - policy = rcu_dereference(state->policy); - retval = __security_genfs_sid(policy, - fstype, path, orig_sclass, sid); - rcu_read_unlock(); + do { + rcu_read_lock(); + policy = rcu_dereference(state->policy); + retval = __security_genfs_sid(policy, fstype, path, + orig_sclass, sid); + rcu_read_unlock(); + } while (retval == -ESTALE); return retval; }
@@ -2886,7 +2951,7 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb) struct selinux_policy *policy; struct policydb *policydb; struct sidtab *sidtab; - int rc = 0; + int rc; struct ocontext *c; struct superblock_security_struct *sbsec = sb->s_security; const char *fstype = sb->s_type->name; @@ -2897,6 +2962,8 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb) return 0; }
+retry: + rc = 0; rcu_read_lock(); policy = rcu_dereference(state->policy); policydb = &policy->policydb; @@ -2914,6 +2981,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb) if (!c->sid[0]) { rc = sidtab_context_to_sid(sidtab, &c->context[0], &c->sid[0]); + if (rc == -ESTALE) { + rcu_read_unlock(); + goto retry; + } if (rc) goto out; } @@ -2921,6 +2992,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb) } else { rc = __security_genfs_sid(policy, fstype, "/", SECCLASS_DIR, &sbsec->sid); + if (rc == -ESTALE) { + rcu_read_unlock(); + goto retry; + } if (rc) { sbsec->behavior = SECURITY_FS_USE_NONE; rc = 0; @@ -3130,12 +3205,13 @@ int security_sid_mls_copy(struct selinux_state *state, u32 len; int rc;
- rc = 0; if (!selinux_initialized(state)) { *new_sid = sid; - goto out; + return 0; }
+retry: + rc = 0; context_init(&newcon);
rcu_read_lock(); @@ -3194,10 +3270,14 @@ int security_sid_mls_copy(struct selinux_state *state, } } rc = sidtab_context_to_sid(sidtab, &newcon, new_sid); + if (rc == -ESTALE) { + rcu_read_unlock(); + context_destroy(&newcon); + goto retry; + } out_unlock: rcu_read_unlock(); context_destroy(&newcon); -out: return rc; }
@@ -3794,6 +3874,8 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state, return 0; }
+retry: + rc = 0; rcu_read_lock(); policy = rcu_dereference(state->policy); policydb = &policy->policydb; @@ -3820,23 +3902,24 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state, goto out; } rc = -EIDRM; - if (!mls_context_isvalid(policydb, &ctx_new)) - goto out_free; + if (!mls_context_isvalid(policydb, &ctx_new)) { + ebitmap_destroy(&ctx_new.range.level[0].cat); + goto out; + }
rc = sidtab_context_to_sid(sidtab, &ctx_new, sid); + ebitmap_destroy(&ctx_new.range.level[0].cat); + if (rc == -ESTALE) { + rcu_read_unlock(); + goto retry; + } if (rc) - goto out_free; + goto out;
security_netlbl_cache_add(secattr, *sid); - - ebitmap_destroy(&ctx_new.range.level[0].cat); } else *sid = SECSID_NULL;
- rcu_read_unlock(); - return 0; -out_free: - ebitmap_destroy(&ctx_new.range.level[0].cat); out: rcu_read_unlock(); return rc; diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c index 5ee190bd30f5..656d50b09f76 100644 --- a/security/selinux/ss/sidtab.c +++ b/security/selinux/ss/sidtab.c @@ -39,6 +39,7 @@ int sidtab_init(struct sidtab *s) for (i = 0; i < SECINITSID_NUM; i++) s->isids[i].set = 0;
+ s->frozen = false; s->count = 0; s->convert = NULL; hash_init(s->context_to_sid); @@ -281,6 +282,15 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context, if (*sid) goto out_unlock;
+ if (unlikely(s->frozen)) { + /* + * This sidtab is now frozen - tell the caller to abort and + * get the new one. + */ + rc = -ESTALE; + goto out_unlock; + } + count = s->count; convert = s->convert;
@@ -474,6 +484,17 @@ void sidtab_cancel_convert(struct sidtab *s) spin_unlock_irqrestore(&s->lock, flags); }
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock) +{ + spin_lock_irqsave(&s->lock, *flags); + s->frozen = true; + s->convert = NULL; +} +void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock) +{ + spin_unlock_irqrestore(&s->lock, *flags); +} + static void sidtab_destroy_entry(struct sidtab_entry *entry) { context_destroy(&entry->context); diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h index 80c744d07ad6..4eff0e49dcb2 100644 --- a/security/selinux/ss/sidtab.h +++ b/security/selinux/ss/sidtab.h @@ -86,6 +86,7 @@ struct sidtab { u32 count; /* access only under spinlock */ struct sidtab_convert_params *convert; + bool frozen; spinlock_t lock;
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 @@ -125,6 +126,9 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
void sidtab_cancel_convert(struct sidtab *s);
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock); +void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock); + int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
void sidtab_destroy(struct sidtab *s); diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 702f91b9c60f..12caa87fe74e 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -1572,6 +1572,14 @@ static int loopback_mixer_new(struct loopback *loopback, int notify) return -ENOMEM; kctl->id.device = dev; kctl->id.subdevice = substr; + + /* Add the control before copying the id so that + * the numid field of the id is set in the copy. + */ + err = snd_ctl_add(card, kctl); + if (err < 0) + return err; + switch (idx) { case ACTIVE_IDX: setup->active_id = kctl->id; @@ -1588,9 +1596,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify) default: break; } - err = snd_ctl_add(card, kctl); - if (err < 0) - return err; } } } diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index a980a4eda51c..7aa9062f4f83 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -944,6 +944,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 58946d069ee5..a7544b77d3f7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3927,6 +3927,15 @@ static void alc271_fixup_dmic(struct hda_codec *codec, snd_hda_sequence_write(codec, verbs); }
+/* Fix the speaker amp after resume, etc */ +static void alc269vb_fixup_aspire_e1_coef(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + if (action == HDA_FIXUP_ACT_INIT) + alc_update_coef_idx(codec, 0x0d, 0x6000, 0x6000); +} + static void alc269_fixup_pcm_44k(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -6301,6 +6310,7 @@ enum { ALC283_FIXUP_HEADSET_MIC, ALC255_FIXUP_MIC_MUTE_LED, ALC282_FIXUP_ASPIRE_V5_PINS, + ALC269VB_FIXUP_ASPIRE_E1_COEF, ALC280_FIXUP_HP_GPIO4, ALC286_FIXUP_HP_GPIO_LED, ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, @@ -6979,6 +6989,10 @@ static const struct hda_fixup alc269_fixups[] = { { }, }, }, + [ALC269VB_FIXUP_ASPIRE_E1_COEF] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269vb_fixup_aspire_e1_coef, + }, [ALC280_FIXUP_HP_GPIO4] = { .type = HDA_FIXUP_FUNC, .v.func = alc280_fixup_hp_gpio4, @@ -7901,6 +7915,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), + SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF), SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK), SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC), @@ -8395,6 +8410,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"}, {.id = ALC255_FIXUP_MIC_MUTE_LED, .name = "alc255-dell-mute"}, {.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"}, + {.id = ALC269VB_FIXUP_ASPIRE_E1_COEF, .name = "aspire-e1-coef"}, {.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"}, {.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, {.id = ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, .name = "hp-gpio2-hotkey"}, diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 660ec46eecf2..ceaf3bbb18e6 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -707,7 +707,13 @@ int wm8960_configure_pll(struct snd_soc_component *component, int freq_in, best_freq_out = -EINVAL; *sysclk_idx = *dac_idx = *bclk_idx = -1;
- for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) { + /* + * From Datasheet, the PLL performs best when f2 is between + * 90MHz and 100MHz, the desired sysclk output is 11.2896MHz + * or 12.288MHz, then sysclkdiv = 2 is the best choice. + * So search sysclk_divs from 2 to 1 other than from 1 to 2. + */ + for (i = ARRAY_SIZE(sysclk_divs) - 1; i >= 0; --i) { if (sysclk_divs[i] == -1) continue; for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) { diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 9e9b05883557..aa5dd590ddd5 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -488,14 +488,14 @@ static struct snd_soc_dai_driver sst_platform_dai[] = { .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "Headset Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, { @@ -506,7 +506,7 @@ static struct snd_soc_dai_driver sst_platform_dai[] = { .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, { diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c index 012bac41fee0..ea8e7ad8684d 100644 --- a/sound/soc/sof/intel/hda-dsp.c +++ b/sound/soc/sof/intel/hda-dsp.c @@ -226,10 +226,17 @@ bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
- is_enable = (val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) && - (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) && - !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) && - !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); +#define MASK_IS_EQUAL(v, m, field) ({ \ + u32 _m = field(m); \ + ((v) & _m) == _m; \ +}) + + is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) && + MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) && + !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) && + !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); + +#undef MASK_IS_EQUAL
dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n", is_enable, core_mask); diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c index 6c13cc84b3fb..2173991c13db 100644 --- a/sound/soc/sunxi/sun4i-codec.c +++ b/sound/soc/sunxi/sun4i-codec.c @@ -1364,6 +1364,7 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev) return ERR_PTR(-ENOMEM);
card->dev = dev; + card->owner = THIS_MODULE; card->name = "sun4i-codec"; card->dapm_widgets = sun4i_codec_card_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(sun4i_codec_card_dapm_widgets); @@ -1396,6 +1397,7 @@ static struct snd_soc_card *sun6i_codec_create_card(struct device *dev) return ERR_PTR(-ENOMEM);
card->dev = dev; + card->owner = THIS_MODULE; card->name = "A31 Audio Codec"; card->dapm_widgets = sun6i_codec_card_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets); @@ -1449,6 +1451,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev) return ERR_PTR(-ENOMEM);
card->dev = dev; + card->owner = THIS_MODULE; card->name = "A23 Audio Codec"; card->dapm_widgets = sun6i_codec_card_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets); @@ -1487,6 +1490,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev) return ERR_PTR(-ENOMEM);
card->dev = dev; + card->owner = THIS_MODULE; card->name = "H3 Audio Codec"; card->dapm_widgets = sun6i_codec_card_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets); @@ -1525,6 +1529,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev) return ERR_PTR(-ENOMEM);
card->dev = dev; + card->owner = THIS_MODULE; card->name = "V3s Audio Codec"; card->dapm_widgets = sun6i_codec_card_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets); diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c index 8caaafe7e312..e7a8d847161f 100644 --- a/tools/lib/bpf/ringbuf.c +++ b/tools/lib/bpf/ringbuf.c @@ -227,7 +227,7 @@ static int ringbuf_process_ring(struct ring* r) if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) { sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ; err = r->sample_cb(r->ctx, sample, len); - if (err) { + if (err < 0) { /* update consumer pos and bail out */ smp_store_release(r->consumer_pos, cons_pos); diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 06746d96742f..ba70937c5362 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -54,6 +54,8 @@ struct xsk_umem { int fd; int refcount; struct list_head ctx_list; + bool rx_ring_setup_done; + bool tx_ring_setup_done; };
struct xsk_ctx { @@ -668,26 +670,30 @@ static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex, return NULL; }
-static void xsk_put_ctx(struct xsk_ctx *ctx) +static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap) { struct xsk_umem *umem = ctx->umem; struct xdp_mmap_offsets off; int err;
- if (--ctx->refcount == 0) { - err = xsk_get_mmap_offsets(umem->fd, &off); - if (!err) { - munmap(ctx->fill->ring - off.fr.desc, - off.fr.desc + umem->config.fill_size * - sizeof(__u64)); - munmap(ctx->comp->ring - off.cr.desc, - off.cr.desc + umem->config.comp_size * - sizeof(__u64)); - } + if (--ctx->refcount) + return;
- list_del(&ctx->list); - free(ctx); - } + if (!unmap) + goto out_free; + + err = xsk_get_mmap_offsets(umem->fd, &off); + if (err) + goto out_free; + + munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size * + sizeof(__u64)); + munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size * + sizeof(__u64)); + +out_free: + list_del(&ctx->list); + free(ctx); }
static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk, @@ -722,8 +728,6 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk, memcpy(ctx->ifname, ifname, IFNAMSIZ - 1); ctx->ifname[IFNAMSIZ - 1] = '\0';
- umem->fill_save = NULL; - umem->comp_save = NULL; ctx->fill = fill; ctx->comp = comp; list_add(&ctx->list, &umem->ctx_list); @@ -779,6 +783,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, struct xsk_socket *xsk; struct xsk_ctx *ctx; int err, ifindex; + bool unmap = umem->fill_save != fill; + bool rx_setup_done = false, tx_setup_done = false;
if (!umem || !xsk_ptr || !(rx || tx)) return -EFAULT; @@ -806,6 +812,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, } } else { xsk->fd = umem->fd; + rx_setup_done = umem->rx_ring_setup_done; + tx_setup_done = umem->tx_ring_setup_done; }
ctx = xsk_get_ctx(umem, ifindex, queue_id); @@ -824,7 +832,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, } xsk->ctx = ctx;
- if (rx) { + if (rx && !rx_setup_done) { err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, &xsk->config.rx_size, sizeof(xsk->config.rx_size)); @@ -832,8 +840,10 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, err = -errno; goto out_put_ctx; } + if (xsk->fd == umem->fd) + umem->rx_ring_setup_done = true; } - if (tx) { + if (tx && !tx_setup_done) { err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING, &xsk->config.tx_size, sizeof(xsk->config.tx_size)); @@ -841,6 +851,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, err = -errno; goto out_put_ctx; } + if (xsk->fd == umem->fd) + umem->rx_ring_setup_done = true; }
err = xsk_get_mmap_offsets(xsk->fd, &off); @@ -919,6 +931,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, }
*xsk_ptr = xsk; + umem->fill_save = NULL; + umem->comp_save = NULL; return 0;
out_mmap_tx: @@ -930,7 +944,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, munmap(rx_map, off.rx.desc + xsk->config.rx_size * sizeof(struct xdp_desc)); out_put_ctx: - xsk_put_ctx(ctx); + xsk_put_ctx(ctx, unmap); out_socket: if (--umem->refcount) close(xsk->fd); @@ -944,6 +958,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, const struct xsk_socket_config *usr_config) { + if (!umem) + return -EFAULT; + return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem, rx, tx, umem->fill_save, umem->comp_save, usr_config); @@ -993,7 +1010,7 @@ void xsk_socket__delete(struct xsk_socket *xsk) } }
- xsk_put_ctx(ctx); + xsk_put_ctx(ctx, true);
umem->refcount--; /* Do not close an fd that also has an associated umem connected diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 43937f4b399a..c0be51b95713 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -906,7 +906,7 @@ int cmd_inject(int argc, const char **argv) }
data.path = inject.input_name; - inject.session = perf_session__new(&data, true, &inject.tool); + inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool); if (IS_ERR(inject.session)) return PTR_ERR(inject.session);
diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c index 423ec69bda6c..5ecd4f401f32 100644 --- a/tools/perf/util/block-info.c +++ b/tools/perf/util/block-info.c @@ -201,7 +201,7 @@ static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt, double ratio = 0.0;
if (block_fmt->total_cycles) - ratio = (double)bi->cycles / (double)block_fmt->total_cycles; + ratio = (double)bi->cycles_aggr / (double)block_fmt->total_cycles;
return color_pct(hpp, block_fmt->width, 100.0 * ratio); } @@ -216,9 +216,9 @@ static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt, double l, r;
if (block_fmt->total_cycles) { - l = ((double)bi_l->cycles / + l = ((double)bi_l->cycles_aggr / (double)block_fmt->total_cycles) * 100000.0; - r = ((double)bi_r->cycles / + r = ((double)bi_r->cycles_aggr / (double)block_fmt->total_cycles) * 100000.0; return (int64_t)l - (int64_t)r; }
linux-stable-mirror@lists.linaro.org