I'm announcing the release of the 5.15.14 kernel.
All users of the 5.15 kernel series must upgrade.
The updated 5.15.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-5.15.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Makefile | 2 arch/arm/boot/dts/bcm2711.dtsi | 2 arch/arm/boot/dts/bcm283x.dtsi | 2 arch/x86/kvm/debugfs.c | 3 drivers/auxdisplay/charlcd.c | 3 drivers/edac/i10nm_base.c | 9 + drivers/gpio/gpio-aspeed-sgpio.c | 2 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 48 ++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 8 - drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 6 + drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c | 1 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c | 25 ++++ drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h | 31 +++++ drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 15 +- drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c | 3 drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 2 drivers/i2c/busses/i2c-mpc.c | 15 +- drivers/i2c/i2c-core-base.c | 95 ------------------ drivers/infiniband/core/uverbs_marshall.c | 2 drivers/infiniband/core/uverbs_uapi.c | 3 drivers/infiniband/hw/mlx5/mlx5_ib.h | 6 - drivers/infiniband/hw/mlx5/mr.c | 26 ++-- drivers/input/touchscreen/zinitix.c | 18 +-- drivers/isdn/mISDN/core.c | 6 - drivers/isdn/mISDN/core.h | 4 drivers/isdn/mISDN/layer1.c | 4 drivers/md/raid1.c | 3 drivers/net/ethernet/amazon/ena/ena_netdev.c | 49 +++++---- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 8 + drivers/net/ethernet/intel/i40e/i40e_main.c | 60 +++++++++-- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 40 ++++++- drivers/net/ethernet/intel/iavf/iavf_main.c | 5 drivers/net/ethernet/sfc/falcon/rx.c | 5 drivers/net/ethernet/sfc/rx_common.c | 5 drivers/net/ieee802154/atusb.c | 10 + drivers/net/usb/r8152.c | 9 + drivers/net/usb/rndis_host.c | 5 drivers/power/reset/ltc2952-poweroff.c | 4 drivers/power/supply/bq25890_charger.c | 4 drivers/power/supply/power_supply_core.c | 4 drivers/reset/reset-rzg2l-usbphy-ctrl.c | 7 + drivers/scsi/libiscsi.c | 6 - drivers/usb/mtu3/mtu3_gadget.c | 4 drivers/video/fbdev/core/fbmem.c | 47 ++++++++ fs/xfs/xfs_ioctl.c | 3 include/linux/fb.h | 1 include/linux/fscache.h | 2 include/net/sctp/sctp.h | 3 kernel/cgroup/cgroup-internal.h | 19 +++ kernel/cgroup/cgroup-v1.c | 33 +++--- kernel/cgroup/cgroup.c | 88 +++++++++++----- kernel/trace/trace.c | 6 - net/batman-adv/multicast.c | 15 +- net/batman-adv/multicast.h | 10 + net/batman-adv/soft-interface.c | 7 - net/core/lwtunnel.c | 4 net/ipv4/fib_semantics.c | 49 ++++++++- net/ipv4/udp.c | 2 net/ipv6/ip6_vti.c | 2 net/ipv6/raw.c | 3 net/ipv6/route.c | 32 +++++- net/mac80211/ieee80211_i.h | 24 ++++ net/mac80211/mesh.h | 22 ---- net/mac80211/mesh_pathtbl.c | 89 +++++----------- net/mac80211/mlme.c | 2 net/netrom/af_netrom.c | 2 net/phonet/pep.c | 1 net/sched/sch_qfq.c | 6 - net/sctp/diag.c | 46 +++----- net/sctp/socket.c | 22 ++-- net/tipc/socket.c | 2 samples/ftrace/ftrace-direct-modify.c | 3 samples/ftrace/ftrace-direct-too.c | 3 samples/ftrace/ftrace-direct.c | 2 tools/testing/selftests/net/udpgro_fwd.sh | 3 tools/testing/selftests/vm/userfaultfd.c | 16 +-- tools/testing/selftests/x86/test_vsyscall.c | 2 79 files changed, 736 insertions(+), 407 deletions(-)
Aaron Ma (1): Revert "net: usb: r8152: Add MAC passthrough support for more Lenovo Docks"
Alex Deucher (3): fbdev: fbmem: add a helper to determine if an aperture is used by a fw fb drm/amdgpu: disable runpm if we are the primary adapter drm/amdgpu: always reset the asic in suspend (v2)
Arthur Kiyanovski (3): net: ena: Fix undefined state when tx request id is out of bounds net: ena: Fix wrong rx request id by resetting device net: ena: Fix error handling when calculating max IO queues number
Charlene Liu (1): drm/amd/display: fix B0 TMDS deepcolor no dislay issue
Chris Packham (1): i2c: mpc: Avoid out of bounds memory access
Christian König (1): drm/amdgpu: fix dropped backing store handling in amdgpu_dma_buf_move_notify
Christoph Hellwig (1): netrom: fix copying in user data in nr_setsockopt
Chunfeng Yun (1): usb: mtu3: fix interval value for intr and isoc
Darrick J. Wong (1): xfs: map unwritten blocks in XFS_IOC_{ALLOC,FREE}SP just like fallocate
David Ahern (7): ipv4: Check attribute length for RTA_GATEWAY in multipath route ipv4: Check attribute length for RTA_FLOW in multipath route ipv6: Check attribute length for RTA_GATEWAY in multipath route ipv6: Check attribute length for RTA_GATEWAY when deleting multipath route lwtunnel: Validate RTA_ENCAP_TYPE attribute length ipv6: Continue processing multipath route even if gateway attribute is invalid ipv6: Do cleanup if attribute validation fails in multipath route
Di Zhu (1): i40e: fix use-after-free in i40e_sync_filters_subtask()
Dominique Martinet (1): fscache_cookie_enabled: check cookie is valid before accessing it
Eric Dumazet (1): sch_qfq: prevent shift-out-of-bounds in qfq_init_qdisc
Evan Quan (2): drm/amdgpu: put SMU into proper state on runpm suspending for BOCO capable platform drm/amd/pm: keep the BACO feature enabled for suspend
Greg Kroah-Hartman (1): Linux 5.15.14
Haimin Zhang (1): net ticp:fix a kernel-infoleak in __tipc_sendmsg()
Hangyu Hua (1): phonet: refcount leak in pep_sock_accep
Heiner Kallweit (1): reset: renesas: Fix Runtime PM usage
Jedrzej Jagielski (1): i40e: Fix incorrect netdev's real number of RX/TX queues
Jianguo Wu (1): selftests: net: udpgro_fwd.sh: explicitly checking the available ping feature
Jiasheng Jiang (1): RDMA/uverbs: Check for null return of kmalloc_array
Jiri Olsa (1): ftrace/samples: Add missing prototypes direct functions
Karen Sornek (1): iavf: Fix limit of total number of queues to active queues of VF
Lai, Derek (1): drm/amd/display: Added power down for DCN10
Len Brown (1): Revert "drm/amdgpu: stop scheduler when calling hw_fini (v2)"
Leon Romanovsky (1): RDMA/core: Don't infoleak GRH fields
Lijo Lazar (1): drm/amd/pm: Fix xgmi link control on aldebaran
Linus Lüssing (1): batman-adv: mcast: don't send link-local multicast to mcast routers
Linus Walleij (1): power: supply: core: Break capacity loop
Lixiaokeng (1): scsi: libiscsi: Fix UAF in iscsi_conn_get_param()/iscsi_conn_teardown()
Luiz Sampaio (1): auxdisplay: charlcd: checking for pointer reference before dereferencing
Maor Gottlieb (1): Revert "RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow"
Martin Habets (1): sfc: The RX page_ring is optional
Mateusz Palczewski (2): i40e: Fix to not show opcode msg on unsuccessful VF MAC change i40e: Fix for displaying message regarding NVM version
Mike Kravetz (1): userfaultfd/selftests: fix hugetlb area allocations
Nathan Chancellor (1): power: reset: ltc2952: Fix use of floating point literals
Naveen N. Rao (2): tracing: Fix check for trace_percpu_buffer validity in get_trace_buf() tracing: Tag trace_percpu_buffer as a percpu pointer
Nikita Travkin (1): Input: zinitix - make sure the IRQ is allocated before it gets enabled
Nikunj A Dadhania (1): KVM: x86: Check for rmaps allocation
Pavel Skripkin (2): ieee802154: atusb: fix uninit value in atusb_set_extended_addr mac80211: mesh: embedd mesh_paths and mpp_paths into ieee80211_if_mesh
Phil Elwell (1): ARM: dts: gpio-ranges property is now required
Prike Liang (1): drm/amd/pm: skip setting gfx cgpg in the s0ix suspend-resume
Qiuxu Zhuo (1): EDAC/i10nm: Release mdev/mbase when failing to detect HBM
Shuah Khan (1): selftests: x86: fix [-Wstringop-overread] warn in test_process_vm_readv()
Song Liu (1): md/raid1: fix missing bitmap update w/o WriteMostly devices
Steven Lee (1): gpio: gpio-aspeed-sgpio: Fix wrong hwirq base in irq handler
Tamir Duberstein (1): ipv6: raw: check passed optlen before reading
Tejun Heo (3): cgroup: Use open-time credentials for process migraton perm checks cgroup: Allocate cgroup_file_ctx for kernfs_open_file->priv cgroup: Use open-time cgroup namespace for process migration perm checks
Thomas Toye (1): rndis_host: support Hytera digital radios
Tom Rix (1): mac80211: initialize variable have_higher_than_11mbit
William Zhao (1): ip6_vti: initialize __ip6_tnl_parm struct in vti6_siocdevprivate
Wolfram Sang (1): Revert "i2c: core: support bus regulator controlling in adapter"
Xin Long (1): sctp: hold endpoint before calling cb in sctp_transport_lookup_process
Yauhen Kharuzhy (1): power: bq25890: Enable continuous conversion for ADC at charging
Zekun Shen (1): atlantic: Fix buff_ring OOB in aq_ring_rx_clean
wolfgang huang (1): mISDN: change function names to avoid conflicts
yangxingwu (1): net: udp: fix alignment problem in udp4_seq_show()
diff --git a/Makefile b/Makefile index 0964b940b889..a469670e7675 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 15 -SUBLEVEL = 13 +SUBLEVEL = 14 EXTRAVERSION = NAME = Trick or Treat
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi index 9e01dbca4a01..dff18fc9a906 100644 --- a/arch/arm/boot/dts/bcm2711.dtsi +++ b/arch/arm/boot/dts/bcm2711.dtsi @@ -582,6 +582,8 @@ &gpio { <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&gpio 0 0 58>; + gpclk0_gpio49: gpclk0_gpio49 { pin-gpclk { pins = "gpio49"; diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index a3e06b680947..c113661a6668 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -126,6 +126,8 @@ gpio: gpio@7e200000 { interrupt-controller; #interrupt-cells = <2>;
+ gpio-ranges = <&gpio 0 0 54>; + /* Defines common pin muxing groups * * While each pin can have its mux selected diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c index 54a83a744538..f33c804a922a 100644 --- a/arch/x86/kvm/debugfs.c +++ b/arch/x86/kvm/debugfs.c @@ -95,6 +95,9 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v) unsigned int *log[KVM_NR_PAGE_SIZES], *cur; int i, j, k, l, ret;
+ if (!kvm_memslots_have_rmaps(kvm)) + return 0; + ret = -ENOMEM; memset(log, 0, sizeof(log)); for (i = 0; i < KVM_NR_PAGE_SIZES; i++) { diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 304accde365c..6c010d4efa4a 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -578,6 +578,9 @@ static int charlcd_init(struct charlcd *lcd) * Since charlcd_init_display() needs to write data, we have to * enable mark the LCD initialized just before. */ + if (WARN_ON(!lcd->ops->init_display)) + return -EINVAL; + ret = lcd->ops->init_display(lcd); if (ret) return ret; diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 83345bfac246..6cf50ee0b77c 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -358,6 +358,9 @@ static int i10nm_get_hbm_munits(void)
mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE); if (!mbase) { + pci_dev_put(d->imc[lmc].mdev); + d->imc[lmc].mdev = NULL; + i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n", base + off); return -ENOMEM; @@ -368,6 +371,12 @@ static int i10nm_get_hbm_munits(void)
mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0); if (!I10NM_IS_HBM_IMC(mcmtr)) { + iounmap(d->imc[lmc].mbase); + d->imc[lmc].mbase = NULL; + d->imc[lmc].hbm_mc = false; + pci_dev_put(d->imc[lmc].mdev); + d->imc[lmc].mdev = NULL; + i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n"); return -ENODEV; } diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c index 3d6ef37a7702..b3a9b8488f11 100644 --- a/drivers/gpio/gpio-aspeed-sgpio.c +++ b/drivers/gpio/gpio-aspeed-sgpio.c @@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc) reg = ioread32(bank_reg(data, bank, reg_irq_status));
for_each_set_bit(p, ®, 32) - generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2); + generic_handle_domain_irq(gc->irq.domain, (i * 32 + p) * 2); }
chained_irq_exit(ic, desc); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 289c7dc05363..f428f94b43c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1069,6 +1069,7 @@ struct amdgpu_device { bool runpm; bool in_runpm; bool has_pr3; + bool is_fw_fb;
bool pm_sysfs_en; bool ucode_sysfs_en; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index ae6ab93c868b..7444484a12bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -384,7 +384,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) struct amdgpu_vm_bo_base *bo_base; int r;
- if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM) + if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM) return;
r = ttm_bo_validate(&bo->tbo, &placement, &ctx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f18240f87387..41677f99c67b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -38,6 +38,7 @@ #include <drm/drm_probe_helper.h> #include <linux/mmu_notifier.h> #include <linux/suspend.h> +#include <linux/fb.h>
#include "amdgpu.h" #include "amdgpu_irq.h" @@ -1246,6 +1247,26 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static const struct drm_driver amdgpu_kms_driver;
+static bool amdgpu_is_fw_framebuffer(resource_size_t base, + resource_size_t size) +{ + bool found = false; +#if IS_REACHABLE(CONFIG_FB) + struct apertures_struct *a; + + a = alloc_apertures(1); + if (!a) + return false; + + a->ranges[0].base = base; + a->ranges[0].size = size; + + found = is_firmware_framebuffer(a); + kfree(a); +#endif + return found; +} + static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1254,6 +1275,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, unsigned long flags = ent->driver_data; int ret, retry = 0; bool supports_atomic = false; + bool is_fw_fb; + resource_size_t base, size;
if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) @@ -1310,6 +1333,10 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, } #endif
+ base = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + is_fw_fb = amdgpu_is_fw_framebuffer(base, size); + /* Get rid of things like offb */ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver); if (ret) @@ -1322,6 +1349,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, adev->dev = &pdev->dev; adev->pdev = pdev; ddev = adev_to_drm(adev); + adev->is_fw_fb = is_fw_fb;
if (!supports_atomic) ddev->driver_features &= ~DRIVER_ATOMIC; @@ -1498,7 +1526,10 @@ static int amdgpu_pmops_suspend(struct device *dev) adev->in_s3 = true; r = amdgpu_device_suspend(drm_dev, true); adev->in_s3 = false; - + if (r) + return r; + if (!adev->in_s0ix) + r = amdgpu_asic_reset(adev); return r; }
@@ -1575,12 +1606,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) if (amdgpu_device_supports_px(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* + * By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some + * proper cleanups and put itself into a state ready for PNP. That + * can address some random resuming failure observed on BOCO capable + * platforms. + * TODO: this may be also needed for PX capable platform. + */ + if (amdgpu_device_supports_boco(drm_dev)) + adev->mp1_state = PP_MP1_STATE_UNLOAD; + ret = amdgpu_device_suspend(drm_dev, false); if (ret) { adev->in_runpm = false; + if (amdgpu_device_supports_boco(drm_dev)) + adev->mp1_state = PP_MP1_STATE_NONE; return ret; }
+ if (amdgpu_device_supports_boco(drm_dev)) + adev->mp1_state = PP_MP1_STATE_NONE; + if (amdgpu_device_supports_px(drm_dev)) { /* Only need to handle PCI state in the driver for ATPX * PCI core handles it for _PR3. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 8d682befe0d6..14499f0de32d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -552,9 +552,6 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue;
- if (!ring->no_scheduler) - drm_sched_stop(&ring->sched, NULL); - /* You can't wait for HW to signal if it's gone */ if (!drm_dev_is_unplugged(&adev->ddev)) r = amdgpu_fence_wait_empty(ring); @@ -614,11 +611,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue;
- if (!ring->no_scheduler) { - drm_sched_resubmit_jobs(&ring->sched); - drm_sched_start(&ring->sched, true); - } - /* enable the interrupt */ if (ring->fence_drv.irq_src) amdgpu_irq_get(adev, ring->fence_drv.irq_src, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 7e45640fbee0..09a2fe839059 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -206,6 +206,12 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) adev->runpm = true; break; } + /* XXX: disable runtime pm if we are the primary adapter + * to avoid displays being re-enabled after DPMS. + * This needs to be sorted out and fixed properly. + */ + if (adev->is_fw_fb) + adev->runpm = false; if (adev->runpm) dev_info(adev->dev, "Using BACO for runtime pm\n"); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index 34001a30d449..10e613ec7d24 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .get_clock = dcn10_get_clock, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, .set_backlight_level = dce110_set_backlight_level, .set_abm_immediate_disable = dce110_set_abm_immediate_disable, .set_pipe = dce110_set_pipe, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 6d8f26dada72..0fe570717ba0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -352,6 +352,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(3, D), clk_src_regs(4, E) }; +/*pll_id being rempped in dmub, in driver it is logical instance*/ +static const struct dce110_clk_src_regs clk_src_regs_b0[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, F), + clk_src_regs(3, G), + clk_src_regs(4, E) +};
static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) @@ -2019,14 +2027,27 @@ static bool dcn31_resource_construct( dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + /*move phypllx_pixclk_resync to dmub next*/ + if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs_b0[2], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs_b0[3], false); + } else { + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); + } + pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h index 93571c976996..cc4bed675588 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h @@ -39,4 +39,35 @@ struct resource_pool *dcn31_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc);
+/*temp: B0 specific before switch to dcn313 headers*/ +#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL +#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e +#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1 +#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f +#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1 + +//PHYPLLF_PIXCLK_RESYNC_CNTL +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L + +//PHYPLLG_PIXCLK_RESYNC_CNTL +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L +#endif #endif /* _DCN31_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 04863a797115..6dc83cfad9d8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1386,8 +1386,14 @@ static int smu_disable_dpms(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret = 0; + /* + * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair + * the workaround which always reset the asic in suspend. + * It's likely that workaround will be dropped in the future. + * Then the change here should be dropped together. + */ bool use_baco = !smu->is_apu && - ((amdgpu_in_reset(adev) && + (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) && (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
@@ -1536,9 +1542,7 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
- /* skip CGPG when in S0ix */ - if (smu->is_apu && !adev->in_s0ix) - smu_set_gfx_cgpg(&adev->smu, false); + smu_set_gfx_cgpg(&adev->smu, false);
return 0; } @@ -1569,8 +1573,7 @@ static int smu_resume(void *handle) return ret; }
- if (smu->is_apu) - smu_set_gfx_cgpg(&adev->smu, true); + smu_set_gfx_cgpg(&adev->smu, true);
smu->disable_uclk_switch = 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index 43028f2cd28b..9c91e79c955f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) { - if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) + /* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */ + if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix) return 0;
return smu_cmn_send_smc_msg_with_param(smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 5019903db492..c9cfeb094750 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1619,7 +1619,7 @@ static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en) { return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl, - en ? 1 : 0, + en ? 0 : 1, NULL); }
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 53b8da6dbb23..db26cc36e13f 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -492,7 +492,7 @@ static void mpc_i2c_finish(struct mpc_i2c *i2c, int rc)
static void mpc_i2c_do_action(struct mpc_i2c *i2c) { - struct i2c_msg *msg = &i2c->msgs[i2c->curr_msg]; + struct i2c_msg *msg = NULL; int dir = 0; int recv_len = 0; u8 byte; @@ -501,10 +501,13 @@ static void mpc_i2c_do_action(struct mpc_i2c *i2c)
i2c->cntl_bits &= ~(CCR_RSTA | CCR_MTX | CCR_TXAK);
- if (msg->flags & I2C_M_RD) - dir = 1; - if (msg->flags & I2C_M_RECV_LEN) - recv_len = 1; + if (i2c->action != MPC_I2C_ACTION_STOP) { + msg = &i2c->msgs[i2c->curr_msg]; + if (msg->flags & I2C_M_RD) + dir = 1; + if (msg->flags & I2C_M_RECV_LEN) + recv_len = 1; + }
switch (i2c->action) { case MPC_I2C_ACTION_RESTART: @@ -581,7 +584,7 @@ static void mpc_i2c_do_action(struct mpc_i2c *i2c) break; }
- if (msg->len == i2c->byte_posn) { + if (msg && msg->len == i2c->byte_posn) { i2c->curr_msg++; i2c->byte_posn = 0;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 54964fbe3f03..cfbef70e8ba7 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -466,14 +466,12 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client) static int i2c_device_probe(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); - struct i2c_adapter *adap; struct i2c_driver *driver; int status;
if (!client) return 0;
- adap = client->adapter; client->irq = client->init_irq;
if (!client->irq) { @@ -539,14 +537,6 @@ static int i2c_device_probe(struct device *dev)
dev_dbg(dev, "probe\n");
- if (adap->bus_regulator) { - status = regulator_enable(adap->bus_regulator); - if (status < 0) { - dev_err(&adap->dev, "Failed to enable bus regulator\n"); - goto err_clear_wakeup_irq; - } - } - status = of_clk_set_defaults(dev->of_node, false); if (status < 0) goto err_clear_wakeup_irq; @@ -604,10 +594,8 @@ static int i2c_device_probe(struct device *dev) static void i2c_device_remove(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); - struct i2c_adapter *adap; struct i2c_driver *driver;
- adap = client->adapter; driver = to_i2c_driver(dev->driver); if (driver->remove) { int status; @@ -622,8 +610,6 @@ static void i2c_device_remove(struct device *dev) devres_release_group(&client->dev, client->devres_group_id);
dev_pm_domain_detach(&client->dev, true); - if (!pm_runtime_status_suspended(&client->dev) && adap->bus_regulator) - regulator_disable(adap->bus_regulator);
dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); @@ -633,86 +619,6 @@ static void i2c_device_remove(struct device *dev) pm_runtime_put(&client->adapter->dev); }
-#ifdef CONFIG_PM_SLEEP -static int i2c_resume_early(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - int err; - - if (!client) - return 0; - - if (pm_runtime_status_suspended(&client->dev) && - client->adapter->bus_regulator) { - err = regulator_enable(client->adapter->bus_regulator); - if (err) - return err; - } - - return pm_generic_resume_early(&client->dev); -} - -static int i2c_suspend_late(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - int err; - - if (!client) - return 0; - - err = pm_generic_suspend_late(&client->dev); - if (err) - return err; - - if (!pm_runtime_status_suspended(&client->dev) && - client->adapter->bus_regulator) - return regulator_disable(client->adapter->bus_regulator); - - return 0; -} -#endif - -#ifdef CONFIG_PM -static int i2c_runtime_resume(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - int err; - - if (!client) - return 0; - - if (client->adapter->bus_regulator) { - err = regulator_enable(client->adapter->bus_regulator); - if (err) - return err; - } - - return pm_generic_runtime_resume(&client->dev); -} - -static int i2c_runtime_suspend(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - int err; - - if (!client) - return 0; - - err = pm_generic_runtime_suspend(&client->dev); - if (err) - return err; - - if (client->adapter->bus_regulator) - return regulator_disable(client->adapter->bus_regulator); - return 0; -} -#endif - -static const struct dev_pm_ops i2c_device_pm = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(i2c_suspend_late, i2c_resume_early) - SET_RUNTIME_PM_OPS(i2c_runtime_suspend, i2c_runtime_resume, NULL) -}; - static void i2c_device_shutdown(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); @@ -772,7 +678,6 @@ struct bus_type i2c_bus_type = { .probe = i2c_device_probe, .remove = i2c_device_remove, .shutdown = i2c_device_shutdown, - .pm = &i2c_device_pm, }; EXPORT_SYMBOL_GPL(i2c_bus_type);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index b8d715c68ca4..11a080646916 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c @@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device, struct rdma_ah_attr *src = ah_attr; struct rdma_ah_attr conv_ah;
- memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved)); + memset(&dst->grh, 0, sizeof(dst->grh));
if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) && (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) && diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c index 2f2c7646fce1..a02916a3a79c 100644 --- a/drivers/infiniband/core/uverbs_uapi.c +++ b/drivers/infiniband/core/uverbs_uapi.c @@ -447,6 +447,9 @@ static int uapi_finalize(struct uverbs_api *uapi) uapi->num_write_ex = max_write_ex + 1; data = kmalloc_array(uapi->num_write + uapi->num_write_ex, sizeof(*uapi->write_methods), GFP_KERNEL); + if (!data) + return -ENOMEM; + for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++) data[i] = &uapi->notsupp_method; uapi->write_methods = data; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 6204ae2caef5..bf20a388eabe 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -641,6 +641,7 @@ struct mlx5_ib_mr {
/* User MR data */ struct mlx5_cache_ent *cache_ent; + struct ib_umem *umem;
/* This is zero'd when the MR is allocated */ union { @@ -652,7 +653,7 @@ struct mlx5_ib_mr { struct list_head list; };
- /* Used only by kernel MRs */ + /* Used only by kernel MRs (umem == NULL) */ struct { void *descs; void *descs_alloc; @@ -674,9 +675,8 @@ struct mlx5_ib_mr { int data_length; };
- /* Used only by User MRs */ + /* Used only by User MRs (umem != NULL) */ struct { - struct ib_umem *umem; unsigned int page_shift; /* Current access_flags */ int access_flags; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 69b2ce4c292a..22e2f4d79743 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1911,18 +1911,19 @@ mlx5_alloc_priv_descs(struct ib_device *device, return ret; }
-static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr) +static void +mlx5_free_priv_descs(struct mlx5_ib_mr *mr) { - struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); - int size = mr->max_descs * mr->desc_size; - - if (!mr->descs) - return; + if (!mr->umem && mr->descs) { + struct ib_device *device = mr->ibmr.device; + int size = mr->max_descs * mr->desc_size; + struct mlx5_ib_dev *dev = to_mdev(device);
- dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, - DMA_TO_DEVICE); - kfree(mr->descs_alloc); - mr->descs = NULL; + dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, + DMA_TO_DEVICE); + kfree(mr->descs_alloc); + mr->descs = NULL; + } }
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) @@ -1998,8 +1999,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) if (mr->cache_ent) { mlx5_mr_cache_free(dev, mr); } else { - if (!udata) - mlx5_free_priv_descs(mr); + mlx5_free_priv_descs(mr); kfree(mr); } return 0; @@ -2086,6 +2086,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, if (err) goto err_free_in;
+ mr->umem = NULL; kfree(in);
return mr; @@ -2212,6 +2213,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, }
mr->ibmr.device = pd->device; + mr->umem = NULL;
switch (mr_type) { case IB_MR_TYPE_MEM_REG: diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c index b8d901099378..1e70b8d2a8d7 100644 --- a/drivers/input/touchscreen/zinitix.c +++ b/drivers/input/touchscreen/zinitix.c @@ -488,6 +488,15 @@ static int zinitix_ts_probe(struct i2c_client *client) return error; }
+ error = devm_request_threaded_irq(&client->dev, client->irq, + NULL, zinitix_ts_irq_handler, + IRQF_ONESHOT | IRQF_NO_AUTOEN, + client->name, bt541); + if (error) { + dev_err(&client->dev, "Failed to request IRQ: %d\n", error); + return error; + } + error = zinitix_init_input_dev(bt541); if (error) { dev_err(&client->dev, @@ -513,15 +522,6 @@ static int zinitix_ts_probe(struct i2c_client *client) return -EINVAL; }
- error = devm_request_threaded_irq(&client->dev, client->irq, - NULL, zinitix_ts_irq_handler, - IRQF_ONESHOT | IRQF_NO_AUTOEN, - client->name, bt541); - if (error) { - dev_err(&client->dev, "Failed to request IRQ: %d\n", error); - return error; - } - return 0; }
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c index 55891e420446..a41b4b264594 100644 --- a/drivers/isdn/mISDN/core.c +++ b/drivers/isdn/mISDN/core.c @@ -381,7 +381,7 @@ mISDNInit(void) err = mISDN_inittimer(&debug); if (err) goto error2; - err = l1_init(&debug); + err = Isdnl1_Init(&debug); if (err) goto error3; err = Isdnl2_Init(&debug); @@ -395,7 +395,7 @@ mISDNInit(void) error5: Isdnl2_cleanup(); error4: - l1_cleanup(); + Isdnl1_cleanup(); error3: mISDN_timer_cleanup(); error2: @@ -408,7 +408,7 @@ static void mISDN_cleanup(void) { misdn_sock_cleanup(); Isdnl2_cleanup(); - l1_cleanup(); + Isdnl1_cleanup(); mISDN_timer_cleanup(); class_unregister(&mISDN_class);
diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h index 23b44d303327..42599f49c189 100644 --- a/drivers/isdn/mISDN/core.h +++ b/drivers/isdn/mISDN/core.h @@ -60,8 +60,8 @@ struct Bprotocol *get_Bprotocol4id(u_int); extern int mISDN_inittimer(u_int *); extern void mISDN_timer_cleanup(void);
-extern int l1_init(u_int *); -extern void l1_cleanup(void); +extern int Isdnl1_Init(u_int *); +extern void Isdnl1_cleanup(void); extern int Isdnl2_Init(u_int *); extern void Isdnl2_cleanup(void);
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c index 98a3bc6c1700..7b31c25a550e 100644 --- a/drivers/isdn/mISDN/layer1.c +++ b/drivers/isdn/mISDN/layer1.c @@ -398,7 +398,7 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) { EXPORT_SYMBOL(create_l1);
int -l1_init(u_int *deb) +Isdnl1_Init(u_int *deb) { debug = deb; l1fsm_s.state_count = L1S_STATE_COUNT; @@ -409,7 +409,7 @@ l1_init(u_int *deb) }
void -l1_cleanup(void) +Isdnl1_cleanup(void) { mISDN_FsmFree(&l1fsm_s); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 6ba12f0f0f03..9fa479493642 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1496,12 +1496,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, if (!r1_bio->bios[i]) continue;
- if (first_clone && test_bit(WriteMostly, &rdev->flags)) { + if (first_clone) { /* do behind I/O ? * Not if there are too many, or cannot * allocate memory, or a reader on WriteMostly * is waiting for behind writes to flush */ if (bitmap && + test_bit(WriteMostly, &rdev->flags) && (atomic_read(&bitmap->behind_writes) < mddev->bitmap_info.max_write_behind) && !waitqueue_active(&bitmap->behind_wait)) { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 0e43000614ab..8f08e0bae300 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1288,26 +1288,22 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) { - struct ena_tx_buffer *tx_info = NULL; + struct ena_tx_buffer *tx_info;
- if (likely(req_id < tx_ring->ring_size)) { - tx_info = &tx_ring->tx_buffer_info[req_id]; - if (likely(tx_info->skb)) - return 0; - } + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (likely(tx_info->skb)) + return 0;
return handle_invalid_req_id(tx_ring, req_id, tx_info, false); }
static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) { - struct ena_tx_buffer *tx_info = NULL; + struct ena_tx_buffer *tx_info;
- if (likely(req_id < xdp_ring->ring_size)) { - tx_info = &xdp_ring->tx_buffer_info[req_id]; - if (likely(tx_info->xdpf)) - return 0; - } + tx_info = &xdp_ring->tx_buffer_info[req_id]; + if (likely(tx_info->xdpf)) + return 0;
return handle_invalid_req_id(xdp_ring, req_id, tx_info, true); } @@ -1332,9 +1328,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id); - if (rc) + if (rc) { + if (unlikely(rc == -EINVAL)) + handle_invalid_req_id(tx_ring, req_id, NULL, + false); break; + }
+ /* validate that the request id points to a valid skb */ rc = validate_tx_req_id(tx_ring, req_id); if (rc) break; @@ -1427,6 +1428,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, u16 *next_to_clean) { struct ena_rx_buffer *rx_info; + struct ena_adapter *adapter; u16 len, req_id, buf = 0; struct sk_buff *skb; void *page_addr; @@ -1439,8 +1441,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(!rx_info->page)) { - netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, - "Page is NULL\n"); + adapter = rx_ring->adapter; + netif_err(adapter, rx_err, rx_ring->netdev, + "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id); + ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); + adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + /* Make sure reset reason is set before triggering the reset */ + smp_mb__before_atomic(); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); return NULL; }
@@ -1896,9 +1904,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq, &req_id); - if (rc) + if (rc) { + if (unlikely(rc == -EINVAL)) + handle_invalid_req_id(xdp_ring, req_id, NULL, + true); break; + }
+ /* validate that the request id points to a valid xdp_frame */ rc = validate_xdp_req_id(xdp_ring, req_id); if (rc) break; @@ -4013,10 +4026,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */ max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); - if (unlikely(!max_num_io_queues)) { - dev_err(&pdev->dev, "The device doesn't have io queues\n"); - return -EFAULT; - }
return max_num_io_queues; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 24122ccda614..72f8751784c3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -365,6 +365,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, if (!buff->is_eop) { buff_ = buff; do { + if (buff_->next >= self->size) { + err = -EIO; + goto err_exit; + } next_ = buff_->next, buff_ = &self->buff_ring[next_]; is_rsc_completed = @@ -388,6 +392,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, (buff->is_lro && buff->is_cso_err)) { buff_ = buff; do { + if (buff_->next >= self->size) { + err = -EIO; + goto err_exit; + } next_ = buff_->next, buff_ = &self->buff_ring[next_];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 76d0b809d134..cc1cefdd4cda 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -99,6 +99,24 @@ MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
+static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f, + struct net_device *netdev, int delta) +{ + struct netdev_hw_addr *ha; + + if (!f || !netdev) + return; + + netdev_for_each_mc_addr(ha, netdev) { + if (ether_addr_equal(ha->addr, f->macaddr)) { + ha->refcount += delta; + if (ha->refcount <= 0) + ha->refcount = 1; + break; + } + } +} + /** * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure @@ -2036,6 +2054,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, hlist_for_each_entry_safe(new, h, from, hlist) { /* We can simply free the wrapper structure */ hlist_del(&new->hlist); + netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); kfree(new); } } @@ -2383,6 +2402,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) &tmp_add_list, &tmp_del_list, vlan_filters); + + hlist_for_each_entry(new, &tmp_add_list, hlist) + netdev_hw_addr_refcnt(new->f, vsi->netdev, 1); + if (retval) goto err_no_memory_locked;
@@ -2515,6 +2538,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) if (new->f->state == I40E_FILTER_NEW) new->f->state = new->state; hlist_del(&new->hlist); + netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); kfree(new); } spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -8716,6 +8740,27 @@ int i40e_open(struct net_device *netdev) return 0; }
+/** + * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues + * @vsi: vsi structure + * + * This updates netdev's number of tx/rx queues + * + * Returns status of setting tx/rx queues + **/ +static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi) +{ + int ret; + + ret = netif_set_real_num_rx_queues(vsi->netdev, + vsi->num_queue_pairs); + if (ret) + return ret; + + return netif_set_real_num_tx_queues(vsi->netdev, + vsi->num_queue_pairs); +} + /** * i40e_vsi_open - * @vsi: the VSI to open @@ -8752,13 +8797,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi) goto err_setup_rx;
/* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(vsi->netdev, - vsi->num_queue_pairs); - if (err) - goto err_set_queues; - - err = netif_set_real_num_rx_queues(vsi->netdev, - vsi->num_queue_pairs); + err = i40e_netif_set_realnum_tx_rx_queues(vsi); if (err) goto err_set_queues;
@@ -14149,6 +14188,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, case I40E_VSI_MAIN: case I40E_VSI_VMDQ2: ret = i40e_config_netdev(vsi); + if (ret) + goto err_netdev; + ret = i40e_netif_set_realnum_tx_rx_queues(vsi); if (ret) goto err_netdev; ret = register_netdev(vsi->netdev); @@ -15451,8 +15493,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) - dev_info(&pdev->dev, - "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n", + dev_dbg(&pdev->dev, + "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 2ea4deb8fc44..048f1678ab8a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1877,17 +1877,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) /***********************virtual channel routines******************/
/** - * i40e_vc_send_msg_to_vf + * i40e_vc_send_msg_to_vf_ex * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length + * @is_quiet: true for not printing unsuccessful return values, false otherwise * * send msg to VF **/ -static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) +static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen, + bool is_quiet) { struct i40e_pf *pf; struct i40e_hw *hw; @@ -1903,7 +1905,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* single place to detect unsuccessful return values */ - if (v_retval) { + if (v_retval && !is_quiet) { vf->num_invalid_msgs++; dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, v_opcode, v_retval); @@ -1933,6 +1935,23 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, return 0; }
+/** + * i40e_vc_send_msg_to_vf + * @vf: pointer to the VF info + * @v_opcode: virtual channel opcode + * @v_retval: virtual channel return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send msg to VF + **/ +static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval, + msg, msglen, false); +} + /** * i40e_vc_send_resp_to_vf * @vf: pointer to the VF info @@ -2695,6 +2714,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) * i40e_check_vf_permission * @vf: pointer to the VF info * @al: MAC address list from virtchnl + * @is_quiet: set true for printing msg without opcode info, false otherwise * * Check that the given list of MAC addresses is allowed. Will return -EPERM * if any address in the list is not valid. Checks the following conditions: @@ -2709,13 +2729,15 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) * addresses might not be accurate. **/ static inline int i40e_check_vf_permission(struct i40e_vf *vf, - struct virtchnl_ether_addr_list *al) + struct virtchnl_ether_addr_list *al, + bool *is_quiet) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; int mac2add_cnt = 0; int i;
+ *is_quiet = false; for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; u8 *addr = al->list[i].addr; @@ -2739,6 +2761,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + *is_quiet = true; return -EPERM; }
@@ -2775,6 +2798,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; + bool is_quiet = false; i40e_status ret = 0; int i;
@@ -2791,7 +2815,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) */ spin_lock_bh(&vsi->mac_filter_hash_lock);
- ret = i40e_check_vf_permission(vf, al); + ret = i40e_check_vf_permission(vf, al, &is_quiet); if (ret) { spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; @@ -2829,8 +2853,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, - ret); + return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + ret, NULL, 0, is_quiet); }
/** diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 4f3b025daa14..6502c8056a8e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2652,8 +2652,11 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } - if (num_qps > IAVF_MAX_REQ_QUEUES) + if (num_qps > adapter->num_active_queues) { + dev_err(&adapter->pdev->dev, + "Cannot support requested number of queues\n"); return -EINVAL; + }
ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); return ret; diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c index 11a6aee852e9..0c6cc2191369 100644 --- a/drivers/net/ethernet/sfc/falcon/rx.c +++ b/drivers/net/ethernet/sfc/falcon/rx.c @@ -110,6 +110,8 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue) struct ef4_rx_page_state *state; unsigned index;
+ if (unlikely(!rx_queue->page_ring)) + return NULL; index = rx_queue->page_remove & rx_queue->page_ptr_mask; page = rx_queue->page_ring[index]; if (page == NULL) @@ -293,6 +295,9 @@ static void ef4_recycle_rx_pages(struct ef4_channel *channel, { struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
+ if (unlikely(!rx_queue->page_ring)) + return; + do { ef4_recycle_rx_page(channel, rx_buf); rx_buf = ef4_rx_buf_next(rx_queue, rx_buf); diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 0983abc0cc5f..633ca77a26fd 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -45,6 +45,8 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) unsigned int index; struct page *page;
+ if (unlikely(!rx_queue->page_ring)) + return NULL; index = rx_queue->page_remove & rx_queue->page_ptr_mask; page = rx_queue->page_ring[index]; if (page == NULL) @@ -114,6 +116,9 @@ void efx_recycle_rx_pages(struct efx_channel *channel, { struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ if (unlikely(!rx_queue->page_ring)) + return; + do { efx_recycle_rx_page(channel, rx_buf); rx_buf = efx_rx_buf_next(rx_queue, rx_buf); diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 23ee0b14cbfa..2f5e7b31032a 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -93,7 +93,9 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
ret = usb_control_msg(usb_dev, pipe, request, requesttype, value, index, data, size, timeout); - if (ret < 0) { + if (ret < size) { + ret = ret < 0 ? ret : -ENODATA; + atusb->err = ret; dev_err(&usb_dev->dev, "%s: req 0x%02x val 0x%x idx 0x%x, error %d\n", @@ -861,9 +863,9 @@ static int atusb_get_and_show_build(struct atusb *atusb) if (!build) return -ENOMEM;
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), - ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, - build, ATUSB_BUILD_SIZE, 1000); + /* We cannot call atusb_control_msg() here, since this request may read various length data */ + ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD, + ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000); if (ret >= 0) { build[ret] = 0; dev_info(&usb_dev->dev, "Firmware: build %s\n", build); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index c08e0857e8a7..d467a9f3bb44 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -9638,9 +9638,12 @@ static int rtl8152_probe(struct usb_interface *intf, netdev->hw_features &= ~NETIF_F_RXCSUM; }
- if (udev->parent && - le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) { - tp->lenovo_macpassthru = 1; + if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) { + switch (le16_to_cpu(udev->descriptor.idProduct)) { + case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2: + case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: + tp->lenovo_macpassthru = 1; + } }
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 85a8b96e39a6..bedd36ab5cf0 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -608,6 +608,11 @@ static const struct usb_device_id products [] = { USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042, USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_poll_status_info, +}, { + /* Hytera Communications DMR radios' "Radio to PC Network" */ + USB_VENDOR_AND_INTERFACE_INFO(0x238b, + USB_CLASS_COMM, 2 /* ACM */, 0x0ff), + .driver_info = (unsigned long)&rndis_info, }, { /* RNDIS is MSFT's un-official variant of CDC ACM */ USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c index 8688c8ba8894..81be33c041d6 100644 --- a/drivers/power/reset/ltc2952-poweroff.c +++ b/drivers/power/reset/ltc2952-poweroff.c @@ -161,8 +161,8 @@ static void ltc2952_poweroff_kill(void)
static void ltc2952_poweroff_default(struct ltc2952_poweroff *data) { - data->wde_interval = 300L * 1E6L; - data->trigger_delay = ktime_set(2, 500L*1E6L); + data->wde_interval = 300L * NSEC_PER_MSEC; + data->trigger_delay = ktime_set(2, 500L * NSEC_PER_MSEC);
hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL); data->timer_trigger.function = ltc2952_poweroff_timer_trigger; diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 945c3257ca93..fe814805c68b 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -581,12 +581,12 @@ static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq)
if (!new_state.online && bq->state.online) { /* power removed */ /* disable ADC */ - ret = bq25890_field_write(bq, F_CONV_START, 0); + ret = bq25890_field_write(bq, F_CONV_RATE, 0); if (ret < 0) goto error; } else if (new_state.online && !bq->state.online) { /* power inserted */ /* enable ADC, to have control of charge current/voltage */ - ret = bq25890_field_write(bq, F_CONV_START, 1); + ret = bq25890_field_write(bq, F_CONV_RATE, 1); if (ret < 0) goto error; } diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 0c2132c7f5d4..a6e9afa5a1cf 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -853,6 +853,10 @@ power_supply_find_ocv2cap_table(struct power_supply_battery_info *info, return NULL;
for (i = 0; i < POWER_SUPPLY_OCV_TEMP_MAX; i++) { + /* Out of capacity tables */ + if (!info->ocv_table[i]) + break; + temp_diff = abs(info->ocv_temp[i] - temp);
if (temp_diff < best_temp_diff) { diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c index e0704fd2b533..1e8315038850 100644 --- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c +++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c @@ -137,7 +137,12 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) dev_set_drvdata(dev, priv);
pm_runtime_enable(&pdev->dev); - pm_runtime_resume_and_get(&pdev->dev); + error = pm_runtime_resume_and_get(&pdev->dev); + if (error < 0) { + pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); + return dev_err_probe(&pdev->dev, error, "pm_runtime_resume_and_get failed"); + }
/* put pll and phy into reset state */ spin_lock_irqsave(&priv->lock, flags); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 5bc91d34df63..cbc263ec9d66 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -3101,6 +3101,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; + char *tmp_persistent_address = conn->persistent_address; + char *tmp_local_ipaddr = conn->local_ipaddr;
del_timer_sync(&conn->transport_timer);
@@ -3122,8 +3124,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) spin_lock_bh(&session->frwd_lock); free_pages((unsigned long) conn->data, get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); - kfree(conn->persistent_address); - kfree(conn->local_ipaddr); /* regular RX path uses back_lock */ spin_lock_bh(&session->back_lock); kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, @@ -3135,6 +3135,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn); + kfree(tmp_persistent_address); + kfree(tmp_local_ipaddr); } EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c index 0b21da4ee183..9977600616d7 100644 --- a/drivers/usb/mtu3/mtu3_gadget.c +++ b/drivers/usb/mtu3/mtu3_gadget.c @@ -77,7 +77,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep) if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { interval = desc->bInterval; - interval = clamp_val(interval, 1, 16) - 1; + interval = clamp_val(interval, 1, 16); if (usb_endpoint_xfer_isoc(desc) && comp_desc) mult = comp_desc->bmAttributes; } @@ -89,7 +89,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep) if (usb_endpoint_xfer_isoc(desc) || usb_endpoint_xfer_int(desc)) { interval = desc->bInterval; - interval = clamp_val(interval, 1, 16) - 1; + interval = clamp_val(interval, 1, 16); mult = usb_endpoint_maxp_mult(desc) - 1; } break; diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 7420d2c16e47..7bd5e2a4a9da 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1759,6 +1759,53 @@ int remove_conflicting_framebuffers(struct apertures_struct *a, } EXPORT_SYMBOL(remove_conflicting_framebuffers);
+/** + * is_firmware_framebuffer - detect if firmware-configured framebuffer matches + * @a: memory range, users of which are to be checked + * + * This function checks framebuffer devices (initialized by firmware/bootloader) + * which use memory range described by @a. If @a matchesm the function returns + * true, otherwise false. + */ +bool is_firmware_framebuffer(struct apertures_struct *a) +{ + bool do_free = false; + bool found = false; + int i; + + if (!a) { + a = alloc_apertures(1); + if (!a) + return false; + + a->ranges[0].base = 0; + a->ranges[0].size = ~0; + do_free = true; + } + + mutex_lock(®istration_lock); + /* check all firmware fbs and kick off if the base addr overlaps */ + for_each_registered_fb(i) { + struct apertures_struct *gen_aper; + + if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE)) + continue; + + gen_aper = registered_fb[i]->apertures; + if (fb_do_apertures_overlap(gen_aper, a)) { + found = true; + break; + } + } + mutex_unlock(®istration_lock); + + if (do_free) + kfree(a); + + return found; +} +EXPORT_SYMBOL(is_firmware_framebuffer); + /** * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices * @pdev: PCI device diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 0c795dc093ef..09269f478df9 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -687,7 +687,8 @@ xfs_ioc_space(
if (bf->l_start > XFS_ISIZE(ip)) { error = xfs_alloc_file_space(ip, XFS_ISIZE(ip), - bf->l_start - XFS_ISIZE(ip), 0); + bf->l_start - XFS_ISIZE(ip), + XFS_BMAPI_PREALLOC); if (error) goto out_unlock; } diff --git a/include/linux/fb.h b/include/linux/fb.h index 5950f8f5dc74..02f362c661c8 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -610,6 +610,7 @@ extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name); extern int remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary); +extern bool is_firmware_framebuffer(struct apertures_struct *a); extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); extern int fb_show_logo(struct fb_info *fb_info, int rotate); extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index a4dab5998613..3b2282c157f7 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -167,7 +167,7 @@ struct fscache_cookie {
static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie) { - return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags); + return fscache_cookie_valid(cookie) && test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags); }
/* diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index d314a180ab93..3ae61ce2eabd 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -112,8 +112,7 @@ struct sctp_transport *sctp_transport_get_next(struct net *net, struct rhashtable_iter *iter); struct sctp_transport *sctp_transport_get_idx(struct net *net, struct rhashtable_iter *iter, int pos); -int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), - struct net *net, +int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net, const union sctp_addr *laddr, const union sctp_addr *paddr, void *p); int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done, diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index bfbeabc17a9d..6e36e854b512 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -65,6 +65,25 @@ static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc) return container_of(kfc, struct cgroup_fs_context, kfc); }
+struct cgroup_pidlist; + +struct cgroup_file_ctx { + struct cgroup_namespace *ns; + + struct { + void *trigger; + } psi; + + struct { + bool started; + struct css_task_iter iter; + } procs; + + struct { + struct cgroup_pidlist *pidlist; + } procs1; +}; + /* * A cgroup can be associated with multiple css_sets as different tasks may * belong to different cgroups on different hierarchies. In the other diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 35b920328344..9537443de22d 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -397,6 +397,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) * next pid to display, if any */ struct kernfs_open_file *of = s->private; + struct cgroup_file_ctx *ctx = of->priv; struct cgroup *cgrp = seq_css(s)->cgroup; struct cgroup_pidlist *l; enum cgroup_filetype type = seq_cft(s)->private; @@ -406,25 +407,24 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) mutex_lock(&cgrp->pidlist_mutex);
/* - * !NULL @of->priv indicates that this isn't the first start() - * after open. If the matching pidlist is around, we can use that. - * Look for it. Note that @of->priv can't be used directly. It - * could already have been destroyed. + * !NULL @ctx->procs1.pidlist indicates that this isn't the first + * start() after open. If the matching pidlist is around, we can use + * that. Look for it. Note that @ctx->procs1.pidlist can't be used + * directly. It could already have been destroyed. */ - if (of->priv) - of->priv = cgroup_pidlist_find(cgrp, type); + if (ctx->procs1.pidlist) + ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
/* * Either this is the first start() after open or the matching * pidlist has been destroyed inbetween. Create a new one. */ - if (!of->priv) { - ret = pidlist_array_load(cgrp, type, - (struct cgroup_pidlist **)&of->priv); + if (!ctx->procs1.pidlist) { + ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist); if (ret) return ERR_PTR(ret); } - l = of->priv; + l = ctx->procs1.pidlist;
if (pid) { int end = l->length; @@ -452,7 +452,8 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) static void cgroup_pidlist_stop(struct seq_file *s, void *v) { struct kernfs_open_file *of = s->private; - struct cgroup_pidlist *l = of->priv; + struct cgroup_file_ctx *ctx = of->priv; + struct cgroup_pidlist *l = ctx->procs1.pidlist;
if (l) mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, @@ -463,7 +464,8 @@ static void cgroup_pidlist_stop(struct seq_file *s, void *v) static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) { struct kernfs_open_file *of = s->private; - struct cgroup_pidlist *l = of->priv; + struct cgroup_file_ctx *ctx = of->priv; + struct cgroup_pidlist *l = ctx->procs1.pidlist; pid_t *p = v; pid_t *end = l->list + l->length; /* @@ -507,10 +509,11 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of, goto out_unlock;
/* - * Even if we're attaching all tasks in the thread group, we only - * need to check permissions on one of them. + * Even if we're attaching all tasks in the thread group, we only need + * to check permissions on one of them. Check permissions using the + * credentials from file open to protect against inherited fd attacks. */ - cred = current_cred(); + cred = of->file->f_cred; tcred = get_task_cred(task); if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && !uid_eq(cred->euid, tcred->uid) && diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index d6ea872b23aa..bb1a78ff1437 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -3630,6 +3630,7 @@ static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v) static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, size_t nbytes, enum psi_res res) { + struct cgroup_file_ctx *ctx = of->priv; struct psi_trigger *new; struct cgroup *cgrp; struct psi_group *psi; @@ -3648,7 +3649,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, return PTR_ERR(new); }
- psi_trigger_replace(&of->priv, new); + psi_trigger_replace(&ctx->psi.trigger, new);
cgroup_put(cgrp);
@@ -3679,12 +3680,16 @@ static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of, static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of, poll_table *pt) { - return psi_trigger_poll(&of->priv, of->file, pt); + struct cgroup_file_ctx *ctx = of->priv; + + return psi_trigger_poll(&ctx->psi.trigger, of->file, pt); }
static void cgroup_pressure_release(struct kernfs_open_file *of) { - psi_trigger_replace(&of->priv, NULL); + struct cgroup_file_ctx *ctx = of->priv; + + psi_trigger_replace(&ctx->psi.trigger, NULL); }
bool cgroup_psi_enabled(void) @@ -3811,24 +3816,43 @@ static ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf, static int cgroup_file_open(struct kernfs_open_file *of) { struct cftype *cft = of_cft(of); + struct cgroup_file_ctx *ctx; + int ret;
- if (cft->open) - return cft->open(of); - return 0; + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->ns = current->nsproxy->cgroup_ns; + get_cgroup_ns(ctx->ns); + of->priv = ctx; + + if (!cft->open) + return 0; + + ret = cft->open(of); + if (ret) { + put_cgroup_ns(ctx->ns); + kfree(ctx); + } + return ret; }
static void cgroup_file_release(struct kernfs_open_file *of) { struct cftype *cft = of_cft(of); + struct cgroup_file_ctx *ctx = of->priv;
if (cft->release) cft->release(of); + put_cgroup_ns(ctx->ns); + kfree(ctx); }
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { - struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; + struct cgroup_file_ctx *ctx = of->priv; struct cgroup *cgrp = of->kn->parent->priv; struct cftype *cft = of_cft(of); struct cgroup_subsys_state *css; @@ -3845,7 +3869,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, */ if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) && !(cft->flags & CFTYPE_NS_DELEGATABLE) && - ns != &init_cgroup_ns && ns->root_cset->dfl_cgrp == cgrp) + ctx->ns != &init_cgroup_ns && ctx->ns->root_cset->dfl_cgrp == cgrp) return -EPERM;
if (cft->write) @@ -4751,21 +4775,21 @@ void css_task_iter_end(struct css_task_iter *it)
static void cgroup_procs_release(struct kernfs_open_file *of) { - if (of->priv) { - css_task_iter_end(of->priv); - kfree(of->priv); - } + struct cgroup_file_ctx *ctx = of->priv; + + if (ctx->procs.started) + css_task_iter_end(&ctx->procs.iter); }
static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos) { struct kernfs_open_file *of = s->private; - struct css_task_iter *it = of->priv; + struct cgroup_file_ctx *ctx = of->priv;
if (pos) (*pos)++;
- return css_task_iter_next(it); + return css_task_iter_next(&ctx->procs.iter); }
static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos, @@ -4773,21 +4797,18 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos, { struct kernfs_open_file *of = s->private; struct cgroup *cgrp = seq_css(s)->cgroup; - struct css_task_iter *it = of->priv; + struct cgroup_file_ctx *ctx = of->priv; + struct css_task_iter *it = &ctx->procs.iter;
/* * When a seq_file is seeked, it's always traversed sequentially * from position 0, so we can simply keep iterating on !0 *pos. */ - if (!it) { + if (!ctx->procs.started) { if (WARN_ON_ONCE((*pos))) return ERR_PTR(-EINVAL); - - it = kzalloc(sizeof(*it), GFP_KERNEL); - if (!it) - return ERR_PTR(-ENOMEM); - of->priv = it; css_task_iter_start(&cgrp->self, iter_flags, it); + ctx->procs.started = true; } else if (!(*pos)) { css_task_iter_end(it); css_task_iter_start(&cgrp->self, iter_flags, it); @@ -4838,9 +4859,9 @@ static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb)
static int cgroup_procs_write_permission(struct cgroup *src_cgrp, struct cgroup *dst_cgrp, - struct super_block *sb) + struct super_block *sb, + struct cgroup_namespace *ns) { - struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; struct cgroup *com_cgrp = src_cgrp; int ret;
@@ -4869,11 +4890,12 @@ static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
static int cgroup_attach_permissions(struct cgroup *src_cgrp, struct cgroup *dst_cgrp, - struct super_block *sb, bool threadgroup) + struct super_block *sb, bool threadgroup, + struct cgroup_namespace *ns) { int ret = 0;
- ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb); + ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb, ns); if (ret) return ret;
@@ -4890,8 +4912,10 @@ static int cgroup_attach_permissions(struct cgroup *src_cgrp, static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, bool threadgroup) { + struct cgroup_file_ctx *ctx = of->priv; struct cgroup *src_cgrp, *dst_cgrp; struct task_struct *task; + const struct cred *saved_cred; ssize_t ret; bool locked;
@@ -4909,9 +4933,16 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); spin_unlock_irq(&css_set_lock);
- /* process and thread migrations follow same delegation rule */ + /* + * Process and thread migrations follow same delegation rule. Check + * permissions using the credentials from file open to protect against + * inherited fd attacks. + */ + saved_cred = override_creds(of->file->f_cred); ret = cgroup_attach_permissions(src_cgrp, dst_cgrp, - of->file->f_path.dentry->d_sb, threadgroup); + of->file->f_path.dentry->d_sb, + threadgroup, ctx->ns); + revert_creds(saved_cred); if (ret) goto out_finish;
@@ -6127,7 +6158,8 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs) goto err;
ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb, - !(kargs->flags & CLONE_THREAD)); + !(kargs->flags & CLONE_THREAD), + current->nsproxy->cgroup_ns); if (ret) goto err;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 18db461f77cd..ce05ba041288 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3231,7 +3231,7 @@ struct trace_buffer_struct { char buffer[4][TRACE_BUF_SIZE]; };
-static struct trace_buffer_struct *trace_percpu_buffer; +static struct trace_buffer_struct __percpu *trace_percpu_buffer;
/* * This allows for lockless recording. If we're nested too deeply, then @@ -3241,7 +3241,7 @@ static char *get_trace_buf(void) { struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
- if (!buffer || buffer->nesting >= 4) + if (!trace_percpu_buffer || buffer->nesting >= 4) return NULL;
buffer->nesting++; @@ -3260,7 +3260,7 @@ static void put_trace_buf(void)
static int alloc_percpu_trace_buffer(void) { - struct trace_buffer_struct *buffers; + struct trace_buffer_struct __percpu *buffers;
if (trace_percpu_buffer) return 0; diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index a3b6658ed789..6e3419beca09 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -1339,6 +1339,7 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv, * @bat_priv: the bat priv with all the soft interface information * @skb: The multicast packet to check * @orig: an originator to be set to forward the skb to + * @is_routable: stores whether the destination is routable * * Return: the forwarding mode as enum batadv_forw_mode and in case of * BATADV_FORW_SINGLE set the orig to the single originator the skb @@ -1346,17 +1347,16 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv, */ enum batadv_forw_mode batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, - struct batadv_orig_node **orig) + struct batadv_orig_node **orig, int *is_routable) { int ret, tt_count, ip_count, unsnoop_count, total_count; bool is_unsnoopable = false; unsigned int mcast_fanout; struct ethhdr *ethhdr; - int is_routable = 0; int rtr_count = 0;
ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable, - &is_routable); + is_routable); if (ret == -ENOMEM) return BATADV_FORW_NONE; else if (ret < 0) @@ -1369,7 +1369,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr); unsnoop_count = !is_unsnoopable ? 0 : atomic_read(&bat_priv->mcast.num_want_all_unsnoopables); - rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable); + rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
total_count = tt_count + ip_count + unsnoop_count + rtr_count;
@@ -1689,6 +1689,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, * @bat_priv: the bat priv with all the soft interface information * @skb: the multicast packet to transmit * @vid: the vlan identifier + * @is_routable: stores whether the destination is routable * * Sends copies of a frame with multicast destination to any node that signaled * interest in it, that is either via the translation table or the according @@ -1701,7 +1702,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise. */ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid) + unsigned short vid, int is_routable) { int ret;
@@ -1717,12 +1718,16 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, return ret; }
+ if (!is_routable) + goto skip_mc_router; + ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid); if (ret != NET_XMIT_SUCCESS) { kfree_skb(skb); return ret; }
+skip_mc_router: consume_skb(skb); return ret; } diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h index 9fee5da08311..8aec818d0bf6 100644 --- a/net/batman-adv/multicast.h +++ b/net/batman-adv/multicast.h @@ -43,7 +43,8 @@ enum batadv_forw_mode {
enum batadv_forw_mode batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, - struct batadv_orig_node **mcast_single_orig); + struct batadv_orig_node **mcast_single_orig, + int *is_routable);
int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, struct sk_buff *skb, @@ -51,7 +52,7 @@ int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node);
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid); + unsigned short vid, int is_routable);
void batadv_mcast_init(struct batadv_priv *bat_priv);
@@ -68,7 +69,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
static inline enum batadv_forw_mode batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, - struct batadv_orig_node **mcast_single_orig) + struct batadv_orig_node **mcast_single_orig, + int *is_routable) { return BATADV_FORW_ALL; } @@ -85,7 +87,7 @@ batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
static inline int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid) + unsigned short vid, int is_routable) { kfree_skb(skb); return NET_XMIT_DROP; diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 0604b0279573..6ab28b509d4b 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -198,6 +198,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, int gw_mode; enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE; struct batadv_orig_node *mcast_single_orig = NULL; + int mcast_is_routable = 0; int network_offset = ETH_HLEN; __be16 proto;
@@ -300,7 +301,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, send: if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) { forw_mode = batadv_mcast_forw_mode(bat_priv, skb, - &mcast_single_orig); + &mcast_single_orig, + &mcast_is_routable); if (forw_mode == BATADV_FORW_NONE) goto dropped;
@@ -359,7 +361,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid, mcast_single_orig); } else if (forw_mode == BATADV_FORW_SOME) { - ret = batadv_mcast_forw_send(bat_priv, skb, vid); + ret = batadv_mcast_forw_send(bat_priv, skb, vid, + mcast_is_routable); } else { if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index 2820aca2173a..9ccd64e8a666 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -197,6 +197,10 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining, nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla_entype) { + if (nla_len(nla_entype) < sizeof(u16)) { + NL_SET_ERR_MSG(extack, "Invalid RTA_ENCAP_TYPE"); + return -EINVAL; + } encap_type = nla_get_u16(nla_entype);
if (lwtunnel_valid_encap_type(encap_type, diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index fde7797b5806..92c29ab3d042 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -662,6 +662,19 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining, return nhs; }
+static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + if (nla_len(nla) < sizeof(*gw)) { + NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY"); + return -EINVAL; + } + + *gw = nla_get_in_addr(nla); + + return 0; +} + /* only called when fib_nh is integrated into fib_info */ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, int remaining, struct fib_config *cfg, @@ -704,7 +717,11 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, return -EINVAL; } if (nla) { - fib_cfg.fc_gw4 = nla_get_in_addr(nla); + ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla, + extack); + if (ret) + goto errout; + if (fib_cfg.fc_gw4) fib_cfg.fc_gw_family = AF_INET; } else if (nlav) { @@ -714,10 +731,18 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, }
nla = nla_find(attrs, attrlen, RTA_FLOW); - if (nla) + if (nla) { + if (nla_len(nla) < sizeof(u32)) { + NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW"); + return -EINVAL; + } fib_cfg.fc_flow = nla_get_u32(nla); + }
fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); + /* RTA_ENCAP_TYPE length checked in + * lwtunnel_valid_encap_type_attr + */ nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); if (nla) fib_cfg.fc_encap_type = nla_get_u16(nla); @@ -902,6 +927,7 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi, attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh); + int err;
nla = nla_find(attrs, attrlen, RTA_GATEWAY); nlav = nla_find(attrs, attrlen, RTA_VIA); @@ -912,12 +938,17 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi, }
if (nla) { + __be32 gw; + + err = fib_gw_from_attr(&gw, nla, extack); + if (err) + return err; + if (nh->fib_nh_gw_family != AF_INET || - nla_get_in_addr(nla) != nh->fib_nh_gw4) + gw != nh->fib_nh_gw4) return 1; } else if (nlav) { struct fib_config cfg2; - int err;
err = fib_gw_from_via(&cfg2, nlav, extack); if (err) @@ -940,8 +971,14 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
#ifdef CONFIG_IP_ROUTE_CLASSID nla = nla_find(attrs, attrlen, RTA_FLOW); - if (nla && nla_get_u32(nla) != nh->nh_tclassid) - return 1; + if (nla) { + if (nla_len(nla) < sizeof(u32)) { + NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW"); + return -EINVAL; + } + if (nla_get_u32(nla) != nh->nh_tclassid) + return 1; + } #endif }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index be07e3d2b77b..835b9d6e4e68 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -3076,7 +3076,7 @@ int udp4_seq_show(struct seq_file *seq, void *v) { seq_setwidth(seq, 127); if (v == SEQ_START_TOKEN) - seq_puts(seq, " sl local_address rem_address st tx_queue " + seq_puts(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 1d8e3ffa225d..42c37ec832f1 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -808,6 +808,8 @@ vti6_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data struct net *net = dev_net(dev); struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ memset(&p1, 0, sizeof(p1)); + switch (cmd) { case SIOCGETTUNNEL: if (dev == ip6n->fb_tnl_dev) { diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 60f1e4f5be5a..c51d5ce3711c 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1020,6 +1020,9 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, struct raw6_sock *rp = raw6_sk(sk); int val;
+ if (optlen < sizeof(val)) + return -EINVAL; + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 79cb5e5a4948..0632382a5427 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -5224,6 +5224,19 @@ static bool ip6_route_mpath_should_notify(const struct fib6_info *rt) return should_notify; }
+static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + if (nla_len(nla) < sizeof(*gw)) { + NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY"); + return -EINVAL; + } + + *gw = nla_get_in6_addr(nla); + + return 0; +} + static int ip6_route_multipath_add(struct fib6_config *cfg, struct netlink_ext_ack *extack) { @@ -5264,10 +5277,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
nla = nla_find(attrs, attrlen, RTA_GATEWAY); if (nla) { - r_cfg.fc_gateway = nla_get_in6_addr(nla); + err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla, + extack); + if (err) + goto cleanup; + r_cfg.fc_flags |= RTF_GATEWAY; } r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); + + /* RTA_ENCAP_TYPE length checked in + * lwtunnel_valid_encap_type_attr + */ nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); if (nla) r_cfg.fc_encap_type = nla_get_u16(nla); @@ -5434,7 +5455,13 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
nla = nla_find(attrs, attrlen, RTA_GATEWAY); if (nla) { - nla_memcpy(&r_cfg.fc_gateway, nla, 16); + err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla, + extack); + if (err) { + last_err = err; + goto next_rtnh; + } + r_cfg.fc_flags |= RTF_GATEWAY; } } @@ -5442,6 +5469,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg, if (err) last_err = err;
+next_rtnh: rtnh = rtnh_next(rtnh, &remaining); }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 159af6c3ffb0..e43804c9387e 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -648,6 +648,26 @@ struct mesh_csa_settings { struct cfg80211_csa_settings settings; };
+/** + * struct mesh_table + * + * @known_gates: list of known mesh gates and their mpaths by the station. The + * gate's mpath may or may not be resolved and active. + * @gates_lock: protects updates to known_gates + * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr + * @walk_head: linked list containing all mesh_path objects + * @walk_lock: lock protecting walk_head + * @entries: number of entries in the table + */ +struct mesh_table { + struct hlist_head known_gates; + spinlock_t gates_lock; + struct rhashtable rhead; + struct hlist_head walk_head; + spinlock_t walk_lock; + atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ +}; + struct ieee80211_if_mesh { struct timer_list housekeeping_timer; struct timer_list mesh_path_timer; @@ -722,8 +742,8 @@ struct ieee80211_if_mesh { /* offset from skb->data while building IE */ int meshconf_offset;
- struct mesh_table *mesh_paths; - struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ + struct mesh_table mesh_paths; + struct mesh_table mpp_paths; /* Store paths for MPP&MAP */ int mesh_paths_generation; int mpp_paths_generation; }; diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 77080b4f87b8..b2b717a78114 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -127,26 +127,6 @@ struct mesh_path { u32 path_change_count; };
-/** - * struct mesh_table - * - * @known_gates: list of known mesh gates and their mpaths by the station. The - * gate's mpath may or may not be resolved and active. - * @gates_lock: protects updates to known_gates - * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr - * @walk_head: linked list containing all mesh_path objects - * @walk_lock: lock protecting walk_head - * @entries: number of entries in the table - */ -struct mesh_table { - struct hlist_head known_gates; - spinlock_t gates_lock; - struct rhashtable rhead; - struct hlist_head walk_head; - spinlock_t walk_lock; - atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ -}; - /* Recent multicast cache */ /* RMC_BUCKETS must be a power of 2, maximum 256 */ #define RMC_BUCKETS 256 @@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); void mesh_path_flush_pending(struct mesh_path *mpath); void mesh_path_tx_pending(struct mesh_path *mpath); -int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); +void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata); int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr); void mesh_path_timer(struct timer_list *t); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 7cab1cf09bf1..acc1c299f1ae 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr) mesh_path_free_rcu(tbl, mpath); }
-static struct mesh_table *mesh_table_alloc(void) +static void mesh_table_init(struct mesh_table *tbl) { - struct mesh_table *newtbl; + INIT_HLIST_HEAD(&tbl->known_gates); + INIT_HLIST_HEAD(&tbl->walk_head); + atomic_set(&tbl->entries, 0); + spin_lock_init(&tbl->gates_lock); + spin_lock_init(&tbl->walk_lock);
- newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); - if (!newtbl) - return NULL; - - INIT_HLIST_HEAD(&newtbl->known_gates); - INIT_HLIST_HEAD(&newtbl->walk_head); - atomic_set(&newtbl->entries, 0); - spin_lock_init(&newtbl->gates_lock); - spin_lock_init(&newtbl->walk_lock); - if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) { - kfree(newtbl); - return NULL; - } - - return newtbl; + /* rhashtable_init() may fail only in case of wrong + * mesh_rht_params + */ + WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params)); }
static void mesh_table_free(struct mesh_table *tbl) { rhashtable_free_and_destroy(&tbl->rhead, mesh_path_rht_free, tbl); - kfree(tbl); }
/** @@ -238,13 +230,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, struct mesh_path * mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); + return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata); }
struct mesh_path * mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); + return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata); }
static struct mesh_path * @@ -281,7 +273,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) struct mesh_path * mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { - return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); + return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx); }
/** @@ -296,7 +288,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) struct mesh_path * mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { - return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); + return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx); }
/** @@ -309,7 +301,7 @@ int mesh_path_add_gate(struct mesh_path *mpath) int err;
rcu_read_lock(); - tbl = mpath->sdata->u.mesh.mesh_paths; + tbl = &mpath->sdata->u.mesh.mesh_paths;
spin_lock_bh(&mpath->state_lock); if (mpath->is_gate) { @@ -418,7 +410,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, if (!new_mpath) return ERR_PTR(-ENOMEM);
- tbl = sdata->u.mesh.mesh_paths; + tbl = &sdata->u.mesh.mesh_paths; spin_lock_bh(&tbl->walk_lock); mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, &new_mpath->rhash, @@ -460,7 +452,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, return -ENOMEM;
memcpy(new_mpath->mpp, mpp, ETH_ALEN); - tbl = sdata->u.mesh.mpp_paths; + tbl = &sdata->u.mesh.mpp_paths;
spin_lock_bh(&tbl->walk_lock); ret = rhashtable_lookup_insert_fast(&tbl->rhead, @@ -489,7 +481,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, void mesh_plink_broken(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - struct mesh_table *tbl = sdata->u.mesh.mesh_paths; + struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath;
@@ -548,7 +540,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) void mesh_path_flush_by_nexthop(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - struct mesh_table *tbl = sdata->u.mesh.mesh_paths; + struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; struct mesh_path *mpath; struct hlist_node *n;
@@ -563,7 +555,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, const u8 *proxy) { - struct mesh_table *tbl = sdata->u.mesh.mpp_paths; + struct mesh_table *tbl = &sdata->u.mesh.mpp_paths; struct mesh_path *mpath; struct hlist_node *n;
@@ -597,8 +589,8 @@ static void table_flush_by_iface(struct mesh_table *tbl) */ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) { - table_flush_by_iface(sdata->u.mesh.mesh_paths); - table_flush_by_iface(sdata->u.mesh.mpp_paths); + table_flush_by_iface(&sdata->u.mesh.mesh_paths); + table_flush_by_iface(&sdata->u.mesh.mpp_paths); }
/** @@ -644,7 +636,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) /* flush relevant mpp entries first */ mpp_flush_by_proxy(sdata, addr);
- err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); + err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr); sdata->u.mesh.mesh_paths_generation++; return err; } @@ -682,7 +674,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) struct mesh_path *gate; bool copy = false;
- tbl = sdata->u.mesh.mesh_paths; + tbl = &sdata->u.mesh.mesh_paths;
rcu_read_lock(); hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { @@ -762,29 +754,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) mesh_path_tx_pending(mpath); }
-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) +void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) { - struct mesh_table *tbl_path, *tbl_mpp; - int ret; - - tbl_path = mesh_table_alloc(); - if (!tbl_path) - return -ENOMEM; - - tbl_mpp = mesh_table_alloc(); - if (!tbl_mpp) { - ret = -ENOMEM; - goto free_path; - } - - sdata->u.mesh.mesh_paths = tbl_path; - sdata->u.mesh.mpp_paths = tbl_mpp; - - return 0; - -free_path: - mesh_table_free(tbl_path); - return ret; + mesh_table_init(&sdata->u.mesh.mesh_paths); + mesh_table_init(&sdata->u.mesh.mpp_paths); }
static @@ -806,12 +779,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
void mesh_path_expire(struct ieee80211_sub_if_data *sdata) { - mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); - mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); + mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths); + mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths); }
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) { - mesh_table_free(sdata->u.mesh.mesh_paths); - mesh_table_free(sdata->u.mesh.mpp_paths); + mesh_table_free(&sdata->u.mesh.mesh_paths); + mesh_table_free(&sdata->u.mesh.mpp_paths); } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index dd42d83dbe33..89c648b035b9 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -5216,7 +5216,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, */ if (new_sta) { u32 rates = 0, basic_rates = 0; - bool have_higher_than_11mbit; + bool have_higher_than_11mbit = false; int min_rate = INT_MAX, min_rate_index = -1; const struct cfg80211_bss_ies *ies; int shift = ieee80211_vif_get_shift(&sdata->vif); diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 6d16e1ab1a8a..eef0e3f2f25b 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -306,7 +306,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(unsigned int)) return -EINVAL;
- if (copy_from_sockptr(&opt, optval, sizeof(unsigned int))) + if (copy_from_sockptr(&opt, optval, sizeof(unsigned long))) return -EFAULT;
switch (optname) { diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 72018e5e4d8e..65d463ad8770 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -868,6 +868,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
err = pep_accept_conn(newsk, skb); if (err) { + __sock_put(sk); sock_put(newsk); newsk = NULL; goto drop; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 58a9d42b52b8..aea435b0aeb3 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1422,10 +1422,8 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt, if (err < 0) return err;
- if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES) - max_classes = QFQ_MAX_AGG_CLASSES; - else - max_classes = qdisc_dev(sch)->tx_queue_len + 1; + max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1, + QFQ_MAX_AGG_CLASSES); /* max_cl_shift = floor(log_2(max_classes)) */ max_cl_shift = __fls(max_classes); q->max_agg_classes = 1<<max_cl_shift; diff --git a/net/sctp/diag.c b/net/sctp/diag.c index a7d623171501..034e2c74497d 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -245,48 +245,44 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc) + 64; }
-static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p) +static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p) { struct sctp_association *assoc = tsp->asoc; - struct sock *sk = tsp->asoc->base.sk; struct sctp_comm_param *commp = p; - struct sk_buff *in_skb = commp->skb; + struct sock *sk = ep->base.sk; const struct inet_diag_req_v2 *req = commp->r; - const struct nlmsghdr *nlh = commp->nlh; - struct net *net = sock_net(in_skb->sk); + struct sk_buff *skb = commp->skb; struct sk_buff *rep; int err;
err = sock_diag_check_cookie(sk, req->id.idiag_cookie); if (err) - goto out; + return err;
- err = -ENOMEM; rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL); if (!rep) - goto out; + return -ENOMEM;
lock_sock(sk); - if (sk != assoc->base.sk) { - release_sock(sk); - sk = assoc->base.sk; - lock_sock(sk); + if (ep != assoc->ep) { + err = -EAGAIN; + goto out; } - err = inet_sctp_diag_fill(sk, assoc, rep, req, - sk_user_ns(NETLINK_CB(in_skb).sk), - NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0, nlh, - commp->net_admin); - release_sock(sk); + + err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk), + NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0, + commp->nlh, commp->net_admin); if (err < 0) { WARN_ON(err == -EMSGSIZE); - kfree_skb(rep); goto out; } + release_sock(sk);
- err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); + return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid);
out: + release_sock(sk); + kfree_skb(rep); return err; }
@@ -429,15 +425,15 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, static int sctp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { - struct sk_buff *in_skb = cb->skb; - struct net *net = sock_net(in_skb->sk); + struct sk_buff *skb = cb->skb; + struct net *net = sock_net(skb->sk); const struct nlmsghdr *nlh = cb->nlh; union sctp_addr laddr, paddr; struct sctp_comm_param commp = { - .skb = in_skb, + .skb = skb, .r = req, .nlh = nlh, - .net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN), + .net_admin = netlink_net_capable(skb, CAP_NET_ADMIN), };
if (req->sdiag_family == AF_INET) { @@ -460,7 +456,7 @@ static int sctp_diag_dump_one(struct netlink_callback *cb, paddr.v6.sin6_family = AF_INET6; }
- return sctp_transport_lookup_process(sctp_tsp_dump_one, + return sctp_transport_lookup_process(sctp_sock_dump_one, net, &laddr, &paddr, &commp); }
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d2215d24634e..6b3c32264cbc 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5317,23 +5317,31 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), } EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
-int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), - struct net *net, +int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net, const union sctp_addr *laddr, const union sctp_addr *paddr, void *p) { struct sctp_transport *transport; - int err; + struct sctp_endpoint *ep; + int err = -ENOENT;
rcu_read_lock(); transport = sctp_addrs_lookup_transport(net, laddr, paddr); + if (!transport) { + rcu_read_unlock(); + return err; + } + ep = transport->asoc->ep; + if (!sctp_endpoint_hold(ep)) { /* asoc can be peeled off */ + sctp_transport_put(transport); + rcu_read_unlock(); + return err; + } rcu_read_unlock(); - if (!transport) - return -ENOENT;
- err = cb(transport, p); + err = cb(ep, transport, p); + sctp_endpoint_put(ep); sctp_transport_put(transport); - return err; } EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ad570c2450be..3e63c83e641c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1461,6 +1461,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) msg_set_syn(hdr, 1); }
+ memset(&skaddr, 0, sizeof(skaddr)); + /* Determine destination */ if (atype == TIPC_SERVICE_RANGE) { return tipc_sendmcast(sock, ua, m, dlen, timeout); diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c index 5b9a09957c6e..89e6bf27cd9f 100644 --- a/samples/ftrace/ftrace-direct-modify.c +++ b/samples/ftrace/ftrace-direct-modify.c @@ -3,6 +3,9 @@ #include <linux/kthread.h> #include <linux/ftrace.h>
+extern void my_direct_func1(void); +extern void my_direct_func2(void); + void my_direct_func1(void) { trace_printk("my direct func1\n"); diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c index 3f0079c9bd6f..11b99325f3db 100644 --- a/samples/ftrace/ftrace-direct-too.c +++ b/samples/ftrace/ftrace-direct-too.c @@ -4,6 +4,9 @@ #include <linux/mm.h> /* for handle_mm_fault() */ #include <linux/ftrace.h>
+extern void my_direct_func(struct vm_area_struct *vma, + unsigned long address, unsigned int flags); + void my_direct_func(struct vm_area_struct *vma, unsigned long address, unsigned int flags) { diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c index a2729d1ef17f..642c50b5f716 100644 --- a/samples/ftrace/ftrace-direct.c +++ b/samples/ftrace/ftrace-direct.c @@ -4,6 +4,8 @@ #include <linux/sched.h> /* for wake_up_process() */ #include <linux/ftrace.h>
+extern void my_direct_func(struct task_struct *p); + void my_direct_func(struct task_struct *p) { trace_printk("waking up %s-%d\n", p->comm, p->pid); diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh index 3ea73013d956..6f05e06f6761 100755 --- a/tools/testing/selftests/net/udpgro_fwd.sh +++ b/tools/testing/selftests/net/udpgro_fwd.sh @@ -193,7 +193,8 @@ for family in 4 6; do SUFFIX="64 nodad" VXDEV=vxlan6 IPT=ip6tables - PING="ping6" + # Use ping6 on systems where ping doesn't handle IPv6 + ping -w 1 -c 1 ::1 > /dev/null 2>&1 || PING="ping6" fi
echo "IPv$family" diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 60aa1a4fc69b..81690f1737c8 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -86,7 +86,7 @@ static bool test_uffdio_minor = false;
static bool map_shared; static int shm_fd; -static int huge_fd; +static int huge_fd = -1; /* only used for hugetlb_shared test */ static char *huge_fd_off0; static unsigned long long *count_verify; static int uffd = -1; @@ -222,6 +222,9 @@ static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
static void hugetlb_release_pages(char *rel_area) { + if (huge_fd == -1) + return; + if (fallocate(huge_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, rel_area == huge_fd_off0 ? 0 : nr_pages * page_size, nr_pages * page_size)) @@ -234,16 +237,17 @@ static void hugetlb_allocate_area(void **alloc_area) char **alloc_area_alias;
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, - (map_shared ? MAP_SHARED : MAP_PRIVATE) | - MAP_HUGETLB, - huge_fd, *alloc_area == area_src ? 0 : - nr_pages * page_size); + map_shared ? MAP_SHARED : + MAP_PRIVATE | MAP_HUGETLB | + (*alloc_area == area_src ? 0 : MAP_NORESERVE), + huge_fd, + *alloc_area == area_src ? 0 : nr_pages * page_size); if (*alloc_area == MAP_FAILED) err("mmap of hugetlbfs file failed");
if (map_shared) { area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_HUGETLB, + MAP_SHARED, huge_fd, *alloc_area == area_src ? 0 : nr_pages * page_size); if (area_alias == MAP_FAILED) diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c index 65c141ebfbbd..5b45e6986aea 100644 --- a/tools/testing/selftests/x86/test_vsyscall.c +++ b/tools/testing/selftests/x86/test_vsyscall.c @@ -497,7 +497,7 @@ static int test_process_vm_readv(void) }
if (vsyscall_map_r) { - if (!memcmp(buf, (const void *)0xffffffffff600000, 4096)) { + if (!memcmp(buf, remote.iov_base, sizeof(buf))) { printf("[OK]\tIt worked and read correct data\n"); } else { printf("[FAIL]\tIt worked but returned incorrect data\n");
linux-stable-mirror@lists.linaro.org