diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 426162009ce9..0e486f41185e 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1099,7 +1099,7 @@ task_delayacct ===============
Enables/disables task delay accounting (see -:doc:`accounting/delay-accounting.rst`). Enabling this feature incurs +Documentation/accounting/delay-accounting.rst. Enabling this feature incurs a small amount of overhead in the scheduler but is useful for debugging and performance tuning. It is required by some tools such as iotop.
diff --git a/Documentation/networking/ipvs-sysctl.rst b/Documentation/networking/ipvs-sysctl.rst index 2afccc63856e..1cfbf1add2fc 100644 --- a/Documentation/networking/ipvs-sysctl.rst +++ b/Documentation/networking/ipvs-sysctl.rst @@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER
0: disable any special handling on port reuse. The new connection will be delivered to the same real server that was - servicing the previous connection. This will effectively - disable expire_nodest_conn. + servicing the previous connection.
bit 1: enable rescheduling of new connections when it is safe. That is, whenever expire_nodest_conn and for TCP sockets, when diff --git a/Makefile b/Makefile index 820ccbe7586f..0faa64733281 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 15 -SUBLEVEL = 5 +SUBLEVEL = 6 EXTRAVERSION = NAME = Trick or Treat
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi index 3b60297af7f6..9e01dbca4a01 100644 --- a/arch/arm/boot/dts/bcm2711.dtsi +++ b/arch/arm/boot/dts/bcm2711.dtsi @@ -506,11 +506,17 @@ pcie0: pcie@7d500000 { #address-cells = <3>; #interrupt-cells = <1>; #size-cells = <2>; - interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>, + interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "pcie", "msi"; interrupt-map-mask = <0x0 0x0 0x0 0x7>; interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143 + IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gicv2 GIC_SPI 144 + IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gicv2 GIC_SPI 145 + IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gicv2 GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>; msi-controller; msi-parent = <&pcie0>; diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index d4f355015e3c..f69d2af3c1fa 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -242,6 +242,8 @@ chipcommon: chipcommon@0 {
gpio-controller; #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; };
pcie0: pcie@12000 { @@ -408,7 +410,7 @@ uart2: serial@18008000 { i2c0: i2c@18009000 { compatible = "brcm,iproc-i2c"; reg = <0x18009000 0x50>; - interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clock-frequency = <100000>; diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index fc2608b18a0d..18f01190dcfd 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h @@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr; u32 socfpga_sdram_self_refresh(u32 sdr_base); extern unsigned int socfpga_sdram_self_refresh_sz;
-extern char secondary_trampoline, secondary_trampoline_end; +extern char secondary_trampoline[], secondary_trampoline_end[];
extern unsigned long socfpga_cpu1start_addr;
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c index fbb80b883e5d..201191cf68f3 100644 --- a/arch/arm/mach-socfpga/platsmp.c +++ b/arch/arm/mach-socfpga/platsmp.c @@ -20,14 +20,14 @@
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) { - int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; + int trampoline_size = secondary_trampoline_end - secondary_trampoline;
if (socfpga_cpu1start_addr) { /* This will put CPU #1 into reset. */ writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); + memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
writel(__pa_symbol(secondary_startup), sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff)); @@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle) { - int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; + int trampoline_size = secondary_trampoline_end - secondary_trampoline;
if (socfpga_cpu1start_addr) { writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr + SOCFPGA_A10_RSTMGR_MODMPURST); - memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); + memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
writel(__pa_symbol(secondary_startup), sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff)); diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 8433a2058eb1..237224484d0f 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep, static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { - VM_BUG_ON(mm != &init_mm); + VM_BUG_ON(mm && mm != &init_mm); __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN); }
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 190b494e22ab..0fd6056ba412 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -292,12 +292,22 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0)
+/* + * We must not call into the scheduler between uaccess_ttbr0_enable() and + * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, + * we must evaluate these outside of the critical section. + */ #define __raw_get_user(x, ptr, err) \ do { \ + __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \ + __typeof__(x) __rgu_val; \ __chk_user_ptr(ptr); \ + \ uaccess_ttbr0_enable(); \ - __raw_get_mem("ldtr", x, ptr, err); \ + __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \ uaccess_ttbr0_disable(); \ + \ + (x) = __rgu_val; \ } while (0)
#define __get_user_error(x, ptr, err) \ @@ -321,14 +331,22 @@ do { \
#define get_user __get_user
+/* + * We must not call into the scheduler between __uaccess_enable_tco_async() and + * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking + * functions, we must evaluate these outside of the critical section. + */ #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ + __typeof__(dst) __gkn_dst = (dst); \ + __typeof__(src) __gkn_src = (src); \ int __gkn_err = 0; \ \ __uaccess_enable_tco_async(); \ - __raw_get_mem("ldr", *((type *)(dst)), \ - (__force type *)(src), __gkn_err); \ + __raw_get_mem("ldr", *((type *)(__gkn_dst)), \ + (__force type *)(__gkn_src), __gkn_err); \ __uaccess_disable_tco_async(); \ + \ if (unlikely(__gkn_err)) \ goto err_label; \ } while (0) @@ -367,11 +385,19 @@ do { \ } \ } while (0)
+/* + * We must not call into the scheduler between uaccess_ttbr0_enable() and + * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, + * we must evaluate these outside of the critical section. + */ #define __raw_put_user(x, ptr, err) \ do { \ - __chk_user_ptr(ptr); \ + __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \ + __typeof__(*(ptr)) __rpu_val = (x); \ + __chk_user_ptr(__rpu_ptr); \ + \ uaccess_ttbr0_enable(); \ - __raw_put_mem("sttr", x, ptr, err); \ + __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \ uaccess_ttbr0_disable(); \ } while (0)
@@ -396,14 +422,22 @@ do { \
#define put_user __put_user
+/* + * We must not call into the scheduler between __uaccess_enable_tco_async() and + * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking + * functions, we must evaluate these outside of the critical section. + */ #define __put_kernel_nofault(dst, src, type, err_label) \ do { \ + __typeof__(dst) __pkn_dst = (dst); \ + __typeof__(src) __pkn_src = (src); \ int __pkn_err = 0; \ \ __uaccess_enable_tco_async(); \ - __raw_put_mem("str", *((type *)(src)), \ - (__force type *)(dst), __pkn_err); \ + __raw_put_mem("str", *((type *)(__pkn_src)), \ + (__force type *)(__pkn_dst), __pkn_err); \ __uaccess_disable_tco_async(); \ + \ if (unlikely(__pkn_err)) \ goto err_label; \ } while(0) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a917d408d27d..23654ccdbfb1 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -3189,7 +3189,7 @@ config STACKTRACE_SUPPORT config PGTABLE_LEVELS int default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48 - default 3 if 64BIT && !PAGE_SIZE_64KB + default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48) default 2
config MIPS_AUTO_PFN_OFFSET diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 630fcb4cb30e..7c861e6a8952 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { - decode_configs(c); - /* All Loongson processors covered here define ExcCode 16 as GSExc. */ c->options |= MIPS_CPU_GSEXCEX;
@@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) panic("Unknown Loongson Processor ID!"); break; } + + decode_configs(c); } #else static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { } diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 3d208afd15bc..2769eb991f58 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -57,8 +57,6 @@ SECTIONS { . = KERNEL_BINARY_TEXT_START;
- _stext = .; /* start of kernel text, includes init code & data */ - __init_begin = .; HEAD_TEXT_SECTION MLONGCALL_DISCARD(INIT_TEXT_SECTION(8)) @@ -82,6 +80,7 @@ SECTIONS /* freed after init ends here */
_text = .; /* Text and read-only data */ + _stext = .; MLONGCALL_KEEP(INIT_TEXT_SECTION(8)) .text ALIGN(PAGE_SIZE) : { TEXT_TEXT diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index 6b1ec9e3541b..349c4a820231 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -202,11 +202,11 @@ _ASM_NOKPROBE_SYMBOL(\name()_virt) mfspr r1, SPRN_SPRG_THREAD lwz r1, TASK_CPU - THREAD(r1) slwi r1, r1, 3 - addis r1, r1, emergency_ctx@ha + addis r1, r1, emergency_ctx-PAGE_OFFSET@ha #else - lis r1, emergency_ctx@ha + lis r1, emergency_ctx-PAGE_OFFSET@ha #endif - lwz r1, emergency_ctx@l(r1) + lwz r1, emergency_ctx-PAGE_OFFSET@l(r1) addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE EXCEPTION_PROLOG_2 0 vmap_stack_overflow prepare_transfer_to_handler diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index fcf4760a3a0e..70b7a8f97153 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm) "r" (0) : "memory"); } asm volatile("ptesync": : :"memory"); + // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now. asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); } else { for (set = 0; set < kvm->arch.tlb_sets; ++set) { @@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm) rb += PPC_BIT(51); /* increment set number */ } asm volatile("ptesync": : :"memory"); - asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); + // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now. + if (cpu_has_feature(CPU_FTR_ARCH_300)) + asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); } }
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts index b254c60589a1..cce5eca31f25 100644 --- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts +++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts @@ -12,7 +12,7 @@ / { #address-cells = <2>; #size-cells = <2>; model = "Microchip PolarFire-SoC Icicle Kit"; - compatible = "microchip,mpfs-icicle-kit"; + compatible = "microchip,mpfs-icicle-kit", "microchip,mpfs";
aliases { ethernet0 = &emac1; @@ -56,8 +56,17 @@ &serial3 { status = "okay"; };
-&sdcard { +&mmc { status = "okay"; + + bus-width = <4>; + disable-wp; + cap-sd-highspeed; + card-detect-delay = <200>; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + sd-uhs-sdr104; };
&emac0 { diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi index 9d2fbbc1f777..b12fd594e717 100644 --- a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi +++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi @@ -6,8 +6,8 @@ / { #address-cells = <2>; #size-cells = <2>; - model = "Microchip MPFS Icicle Kit"; - compatible = "microchip,mpfs-icicle-kit"; + model = "Microchip PolarFire SoC"; + compatible = "microchip,mpfs";
chosen { }; @@ -262,39 +262,14 @@ serial3: serial@20104000 { status = "disabled"; };
- emmc: mmc@20008000 { + /* Common node entry for emmc/sd */ + mmc: mmc@20008000 { compatible = "cdns,sd4hc"; reg = <0x0 0x20008000 0x0 0x1000>; interrupt-parent = <&plic>; interrupts = <88 89>; pinctrl-names = "default"; clocks = <&clkcfg 6>; - bus-width = <4>; - cap-mmc-highspeed; - mmc-ddr-3_3v; - max-frequency = <200000000>; - non-removable; - no-sd; - no-sdio; - voltage-ranges = <3300 3300>; - status = "disabled"; - }; - - sdcard: sdhc@20008000 { - compatible = "cdns,sd4hc"; - reg = <0x0 0x20008000 0x0 0x1000>; - interrupt-parent = <&plic>; - interrupts = <88>; - pinctrl-names = "default"; - clocks = <&clkcfg 6>; - bus-width = <4>; - disable-wp; - cap-sd-highspeed; - card-detect-delay = <200>; - sd-uhs-sdr12; - sd-uhs-sdr25; - sd-uhs-sdr50; - sd-uhs-sdr104; max-frequency = <200000000>; status = "disabled"; }; diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index ff4b52e37e60..5adab895127e 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -62,4 +62,9 @@ void xen_arch_register_cpu(int num); void xen_arch_unregister_cpu(int num); #endif
+#ifdef CONFIG_PVH +void __init xen_pvh_init(struct boot_params *boot_params); +void __init mem_map_via_hcall(struct boot_params *boot_params_p); +#endif + #endif /* _ASM_X86_XEN_HYPERVISOR_H */ diff --git a/block/blk-core.c b/block/blk-core.c index 12aa8c1da600..c2d912d0c976 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -389,8 +389,10 @@ void blk_cleanup_queue(struct request_queue *q) blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
blk_sync_queue(q); - if (queue_is_mq(q)) + if (queue_is_mq(q)) { + blk_mq_cancel_work_sync(q); blk_mq_exit_queue(q); + }
/* * In theory, request pool of sched_tags belongs to request queue. diff --git a/block/blk-mq.c b/block/blk-mq.c index c8a9d10f7c18..82de39926a9f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4018,6 +4018,19 @@ unsigned int blk_mq_rq_cpu(struct request *rq) } EXPORT_SYMBOL(blk_mq_rq_cpu);
+void blk_mq_cancel_work_sync(struct request_queue *q) +{ + if (queue_is_mq(q)) { + struct blk_mq_hw_ctx *hctx; + int i; + + cancel_delayed_work_sync(&q->requeue_work); + + queue_for_each_hw_ctx(q, hctx, i) + cancel_delayed_work_sync(&hctx->run_work); + } +} + static int __init blk_mq_init(void) { int i; diff --git a/block/blk-mq.h b/block/blk-mq.h index d08779f77a26..7cdca23b6263 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -129,6 +129,8 @@ extern int blk_mq_sysfs_register(struct request_queue *q); extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
+void blk_mq_cancel_work_sync(struct request_queue *q); + void blk_mq_release(struct request_queue *q);
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 614d9d47de36..4737ec024ee9 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -805,16 +805,6 @@ static void blk_release_queue(struct kobject *kobj)
blk_free_queue_stats(q->stats);
- if (queue_is_mq(q)) { - struct blk_mq_hw_ctx *hctx; - int i; - - cancel_delayed_work_sync(&q->requeue_work); - - queue_for_each_hw_ctx(q, hctx, i) - cancel_delayed_work_sync(&hctx->run_work); - } - blk_exit_queue(q);
blk_queue_free_zone_bitmaps(q); diff --git a/block/elevator.c b/block/elevator.c index ff45d8388f48..cd02ae332c4e 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -694,12 +694,18 @@ void elevator_init_mq(struct request_queue *q) if (!e) return;
+ /* + * We are called before adding disk, when there isn't any FS I/O, + * so freezing queue plus canceling dispatch work is enough to + * drain any dispatch activities originated from passthrough + * requests, then no need to quiesce queue which may add long boot + * latency, especially when lots of disks are involved. + */ blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); + blk_mq_cancel_work_sync(q);
err = blk_mq_init_sched(q, e);
- blk_mq_unquiesce_queue(q); blk_mq_unfreeze_queue(q);
if (err) { diff --git a/block/genhd.c b/block/genhd.c index 6accd0b185e9..f091a60dcf1e 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1086,6 +1086,8 @@ static void disk_release(struct device *dev) might_sleep(); WARN_ON_ONCE(disk_live(disk));
+ blk_mq_cancel_work_sync(disk->queue); + disk_release_events(disk); kfree(disk->random); xa_destroy(&disk->part_tbl); diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index bd482108310c..3fbb17ecce2d 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -1011,7 +1011,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf) { struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); - struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx]; + struct cpc_register_resource *reg; + + if (!cpc_desc) { + pr_debug("No CPC descriptor for CPU:%d\n", cpunum); + return -ENODEV; + } + + reg = &cpc_desc->cpc_regs[reg_idx];
if (CPC_IN_PCC(reg)) { int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index e312ebaed8db..781e312f4534 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1090,15 +1090,10 @@ struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode) /* All data nodes have parent pointer so just return that */ return to_acpi_data_node(fwnode)->parent; } else if (is_acpi_device_node(fwnode)) { - acpi_handle handle, parent_handle; + struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
- handle = to_acpi_device_node(fwnode)->handle; - if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) { - struct acpi_device *adev; - - if (!acpi_bus_get_device(parent_handle, &adev)) - return acpi_fwnode_handle(adev); - } + if (dev) + return acpi_fwnode_handle(to_acpi_device(dev)); }
return NULL; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 49fb74196d02..cffbe57a8e08 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2710,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc, t->from = thread; else t->from = NULL; - t->sender_euid = proc->cred->euid; + t->sender_euid = task_euid(proc->tsk); t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index dafa631582ba..e15c3bc17a55 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -999,6 +999,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) */ value &= ~GENMASK_ULL(31, 24); value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); + /* + * However, make sure that EPP will be set to "performance" when + * the CPU is brought back online again and the "performance" + * scaling algorithm is still in effect. + */ + cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; }
/* @@ -2249,6 +2255,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(BROADWELL_D, core_funcs), X86_MATCH(BROADWELL_X, core_funcs), X86_MATCH(SKYLAKE_X, core_funcs), + X86_MATCH(ICELAKE_X, core_funcs), {} };
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index de416f9e7921..f5219334fd3a 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -34,6 +34,12 @@ struct scmi_msg_resp_base_attributes { __le16 reserved; };
+struct scmi_msg_resp_base_discover_agent { + __le32 agent_id; + u8 name[SCMI_MAX_STR_SIZE]; +}; + + struct scmi_msg_base_error_notify { __le32 event_control; #define BASE_TP_NOTIFY_ALL BIT(0) @@ -225,18 +231,21 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph, int id, char *name) { int ret; + struct scmi_msg_resp_base_discover_agent *agent_info; struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT, - sizeof(__le32), SCMI_MAX_STR_SIZE, &t); + sizeof(__le32), sizeof(*agent_info), &t); if (ret) return ret;
put_unaligned_le32(id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t); - if (!ret) - strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE); + if (!ret) { + agent_info = t->rx.buf; + strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE); + }
ph->xops->xfer_put(ph, t);
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 4371fdcd5a73..581d34c95769 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -138,9 +138,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd_data->domains = domains; scmi_pd_data->num_domains = num_domains;
- of_genpd_add_provider_onecell(np, scmi_pd_data); - - return 0; + return of_genpd_add_provider_onecell(np, scmi_pd_data); }
static const struct scmi_device_id scmi_id_table[] = { diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 308471586381..cdbb287bd8bc 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -637,7 +637,7 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph, if (ret) return ret;
- put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf); + put_unaligned_le32(sensor_id, t->tx.buf); ret = ph->xops->do_xfer(ph, t); if (!ret) { struct sensors_info *si = ph->get_priv(ph); diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 11e8efb71375..87039c5c03fd 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -82,7 +82,8 @@ static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) }
static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg) + struct scmi_vio_msg *msg, + struct device *dev) { struct scatterlist sg_in; int rc; @@ -94,8 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); if (rc) - dev_err_once(vioch->cinfo->dev, - "failed to add to virtqueue (%d)\n", rc); + dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc); else virtqueue_kick(vioch->vqueue);
@@ -108,7 +108,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch, struct scmi_vio_msg *msg) { if (vioch->is_rx) { - scmi_vio_feed_vq_rx(vioch, msg); + scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev); } else { /* Here IRQs are assumed to be already disabled by the caller */ spin_lock(&vioch->lock); @@ -269,7 +269,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, list_add_tail(&msg->list, &vioch->free_list); spin_unlock_irqrestore(&vioch->lock, flags); } else { - scmi_vio_feed_vq_rx(vioch, msg); + scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev); } }
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c index a5048956a0be..ac08e819088b 100644 --- a/drivers/firmware/arm_scmi/voltage.c +++ b/drivers/firmware/arm_scmi/voltage.c @@ -156,7 +156,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, int cnt;
cmd->domain_id = cpu_to_le32(v->id); - cmd->level_index = desc_index; + cmd->level_index = cpu_to_le32(desc_index); ret = ph->xops->do_xfer(ph, tl); if (ret) break; diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c index 581aa5e9b077..dd7c3d5e8b0b 100644 --- a/drivers/firmware/smccc/soc_id.c +++ b/drivers/firmware/smccc/soc_id.c @@ -50,7 +50,7 @@ static int __init smccc_soc_init(void) arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_SOC_ID, &res);
- if (res.a0 == SMCCC_RET_NOT_SUPPORTED) { + if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) { pr_info("ARCH_SOC_ID not implemented, skipping ....\n"); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index f3d62e196901..0c7963dfacad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, */ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - unsigned int count = AMDGPU_IH_MAX_NUM_IVS; + unsigned int count; u32 wptr;
if (!ih->enabled || adev->shutdown) @@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) wptr = amdgpu_ih_get_wptr(adev, ih);
restart_ih: + count = AMDGPU_IH_MAX_NUM_IVS; DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 16dbe593cba2..970d59a21005 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7729,8 +7729,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VANGOGH: case CHIP_YELLOW_CARP: - clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | - ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); break; default: preempt_disable(); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 025184a556ee..55f8dd6e56b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 + enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -4228,19 +4233,38 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { - uint64_t clock; + uint64_t clock, clock_lo, clock_hi, hi_check;
- amdgpu_gfx_off_ctrl(adev, false); - mutex_lock(&adev->gfx.gpu_clock_mutex); - if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { - clock = gfx_v9_0_kiq_read_clock(adev); - } else { - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + switch (adev->asic_type) { + case CHIP_RENOIR: + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; + default: + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); + if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { + clock = gfx_v9_0_kiq_read_clock(adev); + } else { + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + } + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); + break; } - mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); return clock; }
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 084491afe540..dc995ce52eff 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2213,6 +2213,8 @@ static int dm_resume(void *handle) if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state;
+ amdgpu_dm_outbox_init(adev); + r = dm_dmub_hw_init(adev); if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -2224,8 +2226,8 @@ static int dm_resume(void *handle)
for (i = 0; i < dc_state->stream_count; i++) { dc_state->streams[i]->mode_changed = true; - for (j = 0; j < dc_state->stream_status->plane_count; j++) { - dc_state->stream_status->plane_states[j]->update_flags.raw + for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { + dc_state->stream_status[i].plane_states[j]->update_flags.raw = 0xffffffff; } } @@ -3846,6 +3848,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); + + if (dm->num_of_edps) + update_connector_ext_caps(aconnector); if (amdgpu_dc_feature_mask & DC_PSR_MASK) amdgpu_dm_set_psr_caps(link); } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 258c573acc97..1f406f21b452 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1024,8 +1024,6 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t min_freq, max_freq = 0; uint32_t ret = 0;
- phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); @@ -1038,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, else i = 1;
- size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + size += sprintf(buf + size, "0: %uMhz %s\n", data->gfx_min_freq_limit/100, i == 0 ? "*" : ""); - size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + size += sprintf(buf + size, "1: %uMhz %s\n", i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK, i == 1 ? "*" : ""); - size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", + size += sprintf(buf + size, "2: %uMhz %s\n", data->gfx_max_freq_limit/100, i == 2 ? "*" : ""); break; @@ -1052,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->entries[i].clk / 100, ((mclk_table->entries[i].clk / 100) @@ -1067,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret;
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq); } break; @@ -1083,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret;
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", min_freq, max_freq); } break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index aceebf584225..611969bf4520 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -4914,8 +4914,6 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, int size = 0; uint32_t i, now, clock, pcie_speed;
- phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); @@ -4928,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i;
for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4943,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i;
for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4957,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i;
for (i = 0; i < pcie_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s\n", i, (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", @@ -4965,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_SCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); for (i = 0; i < odn_sclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_sclk_table->entries[i].clock/100, odn_sclk_table->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); for (i = 0; i < odn_mclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_mclk_table->entries[i].clock/100, odn_mclk_table->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index 8e28a8eecefc..03bf8f069222 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1550,8 +1550,6 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t i, now; int size = 0;
- phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, @@ -1561,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_SCLK_INDEX);
for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->entries[i].clk / 100, (i == now) ? "*" : ""); break; @@ -1573,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_MCLK_INDEX);
for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index c981fc2882f0..e6336654c565 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -4639,8 +4639,6 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0, count = 0;
- phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: if (data->registry_data.sclk_dpm_key_disabled) @@ -4654,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, else count = sclk_table->count; for (i = 0; i < count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4665,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4676,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
for (i = 0; i < soc_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, soc_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4688,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
for (i = 0; i < dcef_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, dcef_table->dpm_levels[i].value / 100, (dcef_table->dpm_levels[i].value / 100 == now) ? "*" : ""); @@ -4702,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i];
- size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -4721,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk / 100, podn_vdd_dep->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk/100, podn_vdd_dep->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mem_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index f7e783e1c888..a2f4d6773d45 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2246,8 +2246,6 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, int i, now, size = 0; struct pp_clock_levels_with_latency clocks;
- phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: PP_ASSERT_WITH_CODE( @@ -2260,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get gfx clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2276,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get memory clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2294,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get soc clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; @@ -2312,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get dcef clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 03e63be4ee27..85d55ab4e369 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -3366,8 +3366,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, int ret = 0; uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
- phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); @@ -3376,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret);
if (vega20_get_sclks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; }
for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3394,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret);
if (vega20_get_memclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; }
for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3412,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret);
if (vega20_get_socclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; }
for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3430,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret);
for (i = 0; i < fclk_dpm_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, fclk_dpm_table->dpm_levels[i].value, fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); break; @@ -3442,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret);
if (vega20_get_dcefclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; }
for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3462,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i];
- size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -3483,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", od_table->GfxclkFmin); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", od_table->GfxclkFmax); } break;
case OD_MCLK: if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "1: %10uMhz\n", od_table->UclkFmax); }
@@ -3507,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE"); - size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n", + size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE"); + size += sprintf(buf + size, "0: %10uMhz %10dmV\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n", + size += sprintf(buf + size, "1: %10uMhz %10dmV\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n", + size += sprintf(buf + size, "2: %10uMhz %10dmV\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3 / VOLTAGE_SCALE); } @@ -3522,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break;
case OD_RANGE: - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "%s:\n", "OD_RANGE");
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); }
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3543,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); } diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index b53fee6f1c17..65f172807a0d 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf) if (rc) return rc;
- return sprintf(buf, "%u\n", reg & 1); + return sprintf(buf, "%u\n", reg); } static DEVICE_ATTR_RO(vga_pw);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index cd818a629183..00e53de4812b 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev) { struct drm_device *dev = hv_get_drvdata(hdev); struct hyperv_drm_device *hv = to_hv(dev); + struct pci_dev *pdev;
drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); - vmbus_free_mmio(hv->mem->start, hv->fb_size); + + /* + * Free allocated MMIO memory only on Gen2 VMs. + * On Gen1 VMs, release the PCI device + */ + if (efi_enabled(EFI_BOOT)) { + vmbus_free_mmio(hv->mem->start, hv->fb_size); + } else { + pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, + PCI_DEVICE_ID_HYPERV_VIDEO, NULL); + if (!pdev) { + drm_err(dev, "Unable to find PCI Hyper-V video\n"); + return -ENODEV; + } + pci_release_region(pdev, 0); + pci_dev_put(pdev); + }
return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index b51d690f375f..88d262ba648c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2626,6 +2626,27 @@ nv174_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, };
+static const struct nvkm_device_chip +nv176_chipset = { + .name = "GA106", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gpio = { 0x00000001, ga102_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, + .disp = { 0x00000001, ga102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + static const struct nvkm_device_chip nv177_chipset = { .name = "GA107", @@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x168: device->chip = &nv168_chipset; break; case 0x172: device->chip = &nv172_chipset; break; case 0x174: device->chip = &nv174_chipset; break; + case 0x176: device->chip = &nv176_chipset; break; case 0x177: device->chip = &nv177_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c index cdb1ead26d84..82b4c8e1457c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c @@ -207,11 +207,13 @@ int gm200_acr_wpr_parse(struct nvkm_acr *acr) { const struct wpr_header *hdr = (void *)acr->wpr_fw->data; + struct nvkm_acr_lsfw *lsfw;
while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) { wpr_header_dump(&acr->subdev, hdr); - if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) - return -ENOMEM; + lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); }
return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c index fb9132a39bb1..fd97a935a380 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c @@ -161,11 +161,13 @@ int gp102_acr_wpr_parse(struct nvkm_acr *acr) { const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data; + struct nvkm_acr_lsfw *lsfw;
while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) { wpr_header_v1_dump(&acr->subdev, hdr); - if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) - return -ENOMEM; + lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); }
return 0; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index fddaeb0b09c1..f642bd6e71ff 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -391,7 +391,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) - return ERR_PTR(-ENOMEM); + return NULL;
bo->madv = VC4_MADV_WILLNEED; refcount_set(&bo->usecnt, 0); diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 4b5ebeacd283..6561770f1af5 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -160,6 +160,7 @@ static int hidinput_setkeycode(struct input_dev *dev, if (usage) { *old_keycode = usage->type == EV_KEY ? usage->code : KEY_RESERVED; + usage->type = EV_KEY; usage->code = ke->keycode;
clear_bit(*old_keycode, dev->keybit); @@ -650,10 +651,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel code += KEY_MACRO1; else code += BTN_TRIGGER_HAPPY - 0x1e; - } else { - goto ignore; + break; } - break; + fallthrough; default: switch (field->physical) { case HID_GD_MOUSE: diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 686788ebf3e1..d7687ce70614 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -256,8 +256,11 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda unsigned long now = jiffies; int step_x = msc->touches[id].scroll_x - x; int step_y = msc->touches[id].scroll_y - y; - int step_hr = ((64 - (int)scroll_speed) * msc->scroll_accel) / - SCROLL_HR_STEPS; + int step_hr = + max_t(int, + ((64 - (int)scroll_speed) * msc->scroll_accel) / + SCROLL_HR_STEPS, + 1); int step_x_hr = msc->touches[id].scroll_x_hr - x; int step_y_hr = msc->touches[id].scroll_y_hr - y;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 33a6908995b1..2a4cc39962e7 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2603,6 +2603,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev, return;
switch (equivalent_usage) { + case HID_DG_CONFIDENCE: + wacom_wac->hid_data.confidence = value; + break; case HID_GD_X: wacom_wac->hid_data.x = value; break; @@ -2635,7 +2638,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev, }
if (usage->usage_index + 1 == field->report_count) { - if (equivalent_usage == wacom_wac->hid_data.last_slot_field) + if (equivalent_usage == wacom_wac->hid_data.last_slot_field && + wacom_wac->hid_data.confidence) wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); } } @@ -2653,6 +2657,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
wacom_wac->is_invalid_bt_frame = false;
+ hid_data->confidence = true; + for (i = 0; i < report->maxfield; i++) { struct hid_field *field = report->field[i]; int j; diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 8b2d4e5b2303..466b62cc16dc 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -301,6 +301,7 @@ struct hid_data { bool barrelswitch; bool barrelswitch2; bool serialhi; + bool confidence; int x; int y; int pressure; diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c index f10a603b13fb..7b2474e6876f 100644 --- a/drivers/i2c/busses/i2c-virtio.c +++ b/drivers/i2c/busses/i2c-virtio.c @@ -106,11 +106,10 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
static int virtio_i2c_complete_reqs(struct virtqueue *vq, struct virtio_i2c_req *reqs, - struct i2c_msg *msgs, int num, - bool timedout) + struct i2c_msg *msgs, int num) { struct virtio_i2c_req *req; - bool failed = timedout; + bool failed = false; unsigned int len; int i, j = 0;
@@ -132,7 +131,7 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq, j++; }
- return timedout ? -ETIMEDOUT : j; + return j; }
static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, @@ -141,7 +140,6 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, struct virtio_i2c *vi = i2c_get_adapdata(adap); struct virtqueue *vq = vi->vq; struct virtio_i2c_req *reqs; - unsigned long time_left; int count;
reqs = kcalloc(num, sizeof(*reqs), GFP_KERNEL); @@ -164,11 +162,9 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, reinit_completion(&vi->completion); virtqueue_kick(vq);
- time_left = wait_for_completion_timeout(&vi->completion, adap->timeout); - if (!time_left) - dev_err(&adap->dev, "virtio i2c backend timeout.\n"); + wait_for_completion(&vi->completion);
- count = virtio_i2c_complete_reqs(vq, reqs, msgs, count, !time_left); + count = virtio_i2c_complete_reqs(vq, reqs, msgs, count);
err_free: kfree(reqs); diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index a9e568276c99..a45c5536d250 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -928,10 +928,8 @@ static int __init amd_iommu_v2_init(void) { int ret;
- pr_info("AMD IOMMUv2 driver by Joerg Roedel jroedel@suse.de\n"); - if (!amd_iommu_v2_supported()) { - pr_info("AMD IOMMUv2 functionality not available on this system\n"); + pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n"); /* * Load anyway to provide the symbols to other modules * which may use AMD IOMMUv2 optionally. @@ -946,6 +944,8 @@ static int __init amd_iommu_v2_init(void)
amd_iommu_register_ppr_notifier(&ppr_nb);
+ pr_info("AMD IOMMUv2 loaded and initialized\n"); + return 0;
out: diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 9a356075d345..78f8c8e6803e 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1226,13 +1226,11 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pte = &pte[pfn_level_offset(pfn, level)];
do { - unsigned long level_pfn; + unsigned long level_pfn = pfn & level_mask(level);
if (!dma_pte_present(pte)) goto next;
- level_pfn = pfn & level_mask(level); - /* If range covers entire pagetable, free it */ if (start_pfn <= level_pfn && last_pfn >= level_pfn + level_size(level) - 1) { @@ -1253,7 +1251,7 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, freelist); } next: - pfn += level_size(level); + pfn = level_pfn + level_size(level); } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
if (first_pte) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 5cb260820eda..7f23ad61c094 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -200,8 +200,8 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte) #define DTE_HI_MASK2 GENMASK(7, 4) #define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */ #define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */ -#define PAGE_DESC_HI_MASK1 GENMASK_ULL(39, 36) -#define PAGE_DESC_HI_MASK2 GENMASK_ULL(35, 32) +#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32) +#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36)
static inline phys_addr_t rk_dte_pt_address_v2(u32 dte) { diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 79fa36de8a04..cd9cb354dc2c 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -1199,6 +1199,7 @@ void cec_received_msg_ts(struct cec_adapter *adap, if (abort) dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT; msg->flags = dst->flags; + msg->sequence = dst->sequence; /* Remove it from the wait_queue */ list_del_init(&data->list);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 47aff3b19742..80aaf07b16f2 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -744,10 +744,6 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64, /* * x86 is the only compat architecture with different struct alignment * between 32-bit and 64-bit tasks. - * - * On all other architectures, v4l2_event32 and v4l2_event32_time32 are - * the same as v4l2_event and v4l2_event_time32, so we can use the native - * handlers, converting v4l2_event to v4l2_event_time32 if necessary. */ struct v4l2_event32 { __u32 type; @@ -765,21 +761,6 @@ struct v4l2_event32 { __u32 reserved[8]; };
-#ifdef CONFIG_COMPAT_32BIT_TIME -struct v4l2_event32_time32 { - __u32 type; - union { - compat_s64 value64; - __u8 data[64]; - } u; - __u32 pending; - __u32 sequence; - struct old_timespec32 timestamp; - __u32 id; - __u32 reserved[8]; -}; -#endif - static int put_v4l2_event32(struct v4l2_event *p64, struct v4l2_event32 __user *p32) { @@ -795,7 +776,22 @@ static int put_v4l2_event32(struct v4l2_event *p64, return 0; }
+#endif + #ifdef CONFIG_COMPAT_32BIT_TIME +struct v4l2_event32_time32 { + __u32 type; + union { + compat_s64 value64; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct old_timespec32 timestamp; + __u32 id; + __u32 reserved[8]; +}; + static int put_v4l2_event32_time32(struct v4l2_event *p64, struct v4l2_event32_time32 __user *p32) { @@ -811,7 +807,6 @@ static int put_v4l2_event32_time32(struct v4l2_event *p64, return 0; } #endif -#endif
struct v4l2_edid32 { __u32 pad; @@ -873,9 +868,7 @@ static int put_v4l2_edid32(struct v4l2_edid *p64, #define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32) #define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32) #define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32) -#ifdef CONFIG_X86_64 #define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32) -#endif #define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32) #endif
@@ -929,10 +922,10 @@ unsigned int v4l2_compat_translate_cmd(unsigned int cmd) #ifdef CONFIG_X86_64 case VIDIOC_DQEVENT32: return VIDIOC_DQEVENT; +#endif #ifdef CONFIG_COMPAT_32BIT_TIME case VIDIOC_DQEVENT32_TIME32: return VIDIOC_DQEVENT; -#endif #endif } return cmd; @@ -1025,10 +1018,10 @@ int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd) #ifdef CONFIG_X86_64 case VIDIOC_DQEVENT32: return put_v4l2_event32(parg, arg); +#endif #ifdef CONFIG_COMPAT_32BIT_TIME case VIDIOC_DQEVENT32_TIME32: return put_v4l2_event32_time32(parg, arg); -#endif #endif } return 0; diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index e658f0174242..60f19369de84 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -300,7 +300,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES - | ESDHC_FLAG_CQHCI | ESDHC_FLAG_STATE_LOST_IN_LPMODE | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME, }; @@ -309,7 +308,6 @@ static struct esdhc_soc_data usdhc_imx8mm_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES - | ESDHC_FLAG_CQHCI | ESDHC_FLAG_STATE_LOST_IN_LPMODE, };
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 2d80a04e11d8..7728f26adb19 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -771,7 +771,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host, len -= offset; }
- BUG_ON(len > 65536); + /* + * The block layer forces a minimum segment size of PAGE_SIZE, + * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write + * multiple descriptors, noting that the ADMA table is sized + * for 4KiB chunks anyway, so it will be big enough. + */ + while (len > host->max_adma) { + int n = 32 * 1024; /* 32KiB*/ + + __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); + addr += n; + len -= n; + }
/* tran, valid */ if (len) @@ -3952,6 +3964,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, * descriptor for each segment, plus 1 for a nop end descriptor. */ host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; + host->max_adma = 65536;
host->max_timeout_count = 0xE;
@@ -4617,10 +4630,12 @@ int sdhci_setup_host(struct sdhci_host *host) * be larger than 64 KiB though. */ if (host->flags & SDHCI_USE_ADMA) { - if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) + if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { + host->max_adma = 65532; /* 32-bit alignment */ mmc->max_seg_size = 65535; - else + } else { mmc->max_seg_size = 65536; + } } else { mmc->max_seg_size = mmc->max_req_size; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index e8d04e42a5af..6c689be3e48f 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -340,7 +340,8 @@ struct sdhci_adma2_64_desc {
/* * Maximum segments assuming a 512KiB maximum requisition size and a minimum - * 4KiB page size. + * 4KiB page size. Note this also allows enough for multiple descriptors in + * case of PAGE_SIZE >= 64KiB. */ #define SDHCI_MAX_SEGS 128
@@ -543,6 +544,7 @@ struct sdhci_host { unsigned int blocks; /* remaining PIO blocks */
int sg_count; /* Mapped sg entries */ + int max_adma; /* Max. length in ADMA descriptor */
void *adma_table; /* ADMA descriptor table */ void *align_buffer; /* Bounce buffer */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 5ebd96f6833d..526fb56c84f2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -985,6 +985,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); const struct hnae3_ae_ops *ops = h->ae_algo->ops; const struct hns3_reset_type_map *rst_type_map; + enum ethtool_reset_flags rst_flags; u32 i, size;
if (ops->ae_dev_resetting && ops->ae_dev_resetting(h)) @@ -1004,6 +1005,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) for (i = 0; i < size; i++) { if (rst_type_map[i].rst_flags == *flags) { rst_type = rst_type_map[i].rst_type; + rst_flags = rst_type_map[i].rst_flags; break; } } @@ -1019,6 +1021,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
ops->reset_event(h->pdev, h);
+ *flags &= ~rst_flags; + return 0; }
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 3b8bde58613a..fee7d9e79f8c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -703,9 +703,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) roundup_size = ilog2(roundup_size);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); + tc_valid[i] = 1; tc_size[i] = roundup_size; - tc_offset[i] = rss_size * i; + tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0; }
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 46312a4415ba..0ae6da2992d0 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -305,6 +305,7 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26) #define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27) #define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28) +#define IAVF_FLAG_AQ_REQUEST_STATS BIT(29)
/* OS defined structs */ struct net_device *netdev; @@ -398,6 +399,7 @@ int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); void iavf_schedule_reset(struct iavf_adapter *adapter); +void iavf_schedule_request_stats(struct iavf_adapter *adapter); void iavf_reset(struct iavf_adapter *adapter); void iavf_set_ethtool_ops(struct net_device *netdev); void iavf_update_stats(struct iavf_adapter *adapter); @@ -455,4 +457,5 @@ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter); void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr); +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 144a77679359..0cecaff38d04 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -354,6 +354,9 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); unsigned int i;
+ /* Explicitly request stats refresh */ + iavf_schedule_request_stats(adapter); + iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
rcu_read_lock(); @@ -723,12 +726,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, * * Change the ITR settings for a specific queue. **/ -static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, - struct ethtool_coalesce *ec, int queue) +static int iavf_set_itr_per_queue(struct iavf_adapter *adapter, + struct ethtool_coalesce *ec, int queue) { struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; struct iavf_q_vector *q_vector; + u16 itr_setting; + + itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->rx_coalesce_usecs != itr_setting && + ec->use_adaptive_rx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); + return -EINVAL; + } + + itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->tx_coalesce_usecs != itr_setting && + ec->use_adaptive_tx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); + return -EINVAL; + }
rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); @@ -751,6 +773,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, * the Tx and Rx ITR values based on the values we have entered * into the q_vector, no need to write the values now. */ + return 0; }
/** @@ -792,9 +815,11 @@ static int __iavf_set_coalesce(struct net_device *netdev, */ if (queue < 0) { for (i = 0; i < adapter->num_active_queues; i++) - iavf_set_itr_per_queue(adapter, ec, i); + if (iavf_set_itr_per_queue(adapter, ec, i)) + return -EINVAL; } else if (queue < adapter->num_active_queues) { - iavf_set_itr_per_queue(adapter, ec, queue); + if (iavf_set_itr_per_queue(adapter, ec, queue)) + return -EINVAL; } else { netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", adapter->num_active_queues - 1); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index aaf8a2f396e4..fd3717ae70ab 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -138,7 +138,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, * * Returns 0 on success, negative on failure **/ -static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) { unsigned int wait, delay = 10;
@@ -165,6 +165,19 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) } }
+/** + * iavf_schedule_request_stats - Set the flags and schedule statistics request + * @adapter: board private structure + * + * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly + * request and refresh ethtool stats + **/ +void iavf_schedule_request_stats(struct iavf_adapter *adapter) +{ + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +} + /** * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure @@ -695,13 +708,11 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) **/ static void iavf_restore_filters(struct iavf_adapter *adapter) { - /* re-add all VLAN filters */ - if (VLAN_ALLOWED(adapter)) { - u16 vid; + u16 vid;
- for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID) - iavf_add_vlan(adapter, vid); - } + /* re-add all VLAN filters */ + for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID) + iavf_add_vlan(adapter, vid); }
/** @@ -736,9 +747,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev);
- if (!VLAN_ALLOWED(adapter)) - return -EIO; - iavf_del_vlan(adapter, vid); clear_bit(vid, adapter->vsi.active_vlans);
@@ -1700,6 +1708,11 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_del_adv_rss_cfg(adapter); return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { + iavf_request_stats(adapter); + return 0; + } + return -EAGAIN; }
@@ -2124,7 +2137,6 @@ static void iavf_reset_task(struct work_struct *work) struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *f, *ftmp; - struct iavf_vlan_filter *vlf; struct iavf_cloud_filter *cf; u32 reg_val; int i = 0, err; @@ -2264,11 +2276,6 @@ static void iavf_reset_task(struct work_struct *work) list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; } - /* re-add all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->add = true; - } - spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* check if TCs are running and re-add all cloud filters */ @@ -2282,7 +2289,6 @@ static void iavf_reset_task(struct work_struct *work) spin_unlock_bh(&adapter->cloud_filter_list_lock);
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter);
@@ -3380,11 +3386,16 @@ static int iavf_set_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev);
- /* Don't allow changing VLAN_RX flag when adapter is not capable - * of VLAN offload + /* Don't allow enabling VLAN features when adapter is not capable + * of VLAN offload/filtering */ if (!VLAN_ALLOWED(adapter)) { - if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) + netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER); + if (features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER)) return -EINVAL; } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 3c735968e1b8..08302ab35d68 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -607,7 +607,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter) if (f->add) count++; } - if (!count) { + if (!count || !VLAN_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; @@ -673,9 +673,19 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
- list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->remove) + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + /* since VLAN capabilities are not allowed, we dont want to send + * a VLAN delete request because it will most likely fail and + * create unnecessary errors/noise, so just free the VLAN + * filters marked for removal to enable bailing out before + * sending a virtchnl message + */ + if (f->remove && !VLAN_ALLOWED(adapter)) { + list_del(&f->list); + kfree(f); + } else if (f->remove) { count++; + } } if (!count) { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; @@ -784,6 +794,8 @@ void iavf_request_stats(struct iavf_adapter *adapter) /* no error message, this isn't crucial */ return; } + + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ @@ -1722,8 +1734,37 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } spin_lock_bh(&adapter->mac_vlan_list_lock); iavf_add_filter(adapter, adapter->hw.mac.addr); + + if (VLAN_ALLOWED(adapter)) { + if (!list_empty(&adapter->vlan_filter_list)) { + struct iavf_vlan_filter *vlf; + + /* re-add all VLAN filters over virtchnl */ + list_for_each_entry(vlf, + &adapter->vlan_filter_list, + list) + vlf->add = true; + + adapter->aq_required |= + IAVF_FLAG_AQ_ADD_VLAN_FILTER; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_process_config(adapter); + + /* unlock crit_lock before acquiring rtnl_lock as other + * processes holding rtnl_lock could be waiting for the same + * crit_lock + */ + mutex_unlock(&adapter->crit_lock); + rtnl_lock(); + netdev_update_features(adapter->netdev); + rtnl_unlock(); + if (iavf_lock_timeout(&adapter->crit_lock, 10000)) + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", + __FUNCTION__); + } break; case VIRTCHNL_OP_ENABLE_QUEUES: diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index e47920fe73b8..62bf879dc623 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -83,8 +83,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->rx_rings) goto err_rings;
- /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ - vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq), + /* txq_map needs to have enough space to track both Tx (stack) rings + * and XDP rings; at this point vsi->num_xdp_txq might not be set, + * so use num_possible_cpus() as we want to always provide XDP ring + * per CPU, regardless of queue count settings from user that might + * have come from ethtool's set_channels() callback; + */ + vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), sizeof(*vsi->txq_map), GFP_KERNEL);
if (!vsi->txq_map) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a39136b0bd16..f622ee20ac40 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2497,7 +2497,18 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) ice_stat_str(status)); goto clear_xdp_rings; } - ice_vsi_assign_bpf_prog(vsi, prog); + + /* assign the prog only when it's not already present on VSI; + * this flow is a subject of both ethtool -L and ndo_bpf flows; + * VSI rebuild that happens under ethtool -L can expose us to + * the bpf_prog refcount issues as we would be swapping same + * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put + * on it as it would be treated as an 'old_prog'; for ndo_bpf + * this is not harmful as dev_xdp_install bumps the refcount + * before calling the op exposed by the driver; + */ + if (!ice_is_xdp_ena_vsi(vsi)) + ice_vsi_assign_bpf_prog(vsi, prog);
return 0; clear_xdp_rings: @@ -2643,6 +2654,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); } else { + /* safe to call even when prog == vsi->xdp_prog as + * dev_xdp_install in net/core/dev.c incremented prog's + * refcount so corresponding bpf_prog_put won't cause + * underflow + */ ice_vsi_assign_bpf_prog(vsi, prog); }
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 751de06019a0..8f30577386b6 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8019,7 +8019,7 @@ static int igb_poll(struct napi_struct *napi, int budget) if (likely(napi_complete_done(napi, work_done))) igb_ring_irq_enable(q_vector);
- return min(work_done, budget - 1); + return work_done; }
/** diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index d74d4966b13f..ed6d0c019573 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5017,11 +5017,13 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); }
+ if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { + netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n", + mtu, (int)MVPP2_MAX_RX_BUF_SIZE); + return -EINVAL; + } + if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { - if (port->xdp_prog) { - netdev_err(dev, "Jumbo frames are not supported with XDP\n"); - return -EINVAL; - } if (priv->percpu_pools) { netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); mvpp2_bm_switch_buffers(priv, false); @@ -5307,8 +5309,8 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) bool running = netif_running(port->dev); bool reset = !prog != !port->xdp_prog;
- if (port->dev->mtu > ETH_DATA_LEN) { - NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled"); + if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { + NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); return -EOPNOTSUPP; }
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c index 3ce6ccd0f539..b4599fe4ca8d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -497,8 +497,8 @@ int prestera_bridge_port_join(struct net_device *br_dev,
br_port = prestera_bridge_port_add(bridge, port->dev); if (IS_ERR(br_port)) { - err = PTR_ERR(br_port); - goto err_brport_create; + prestera_bridge_put(bridge); + return PTR_ERR(br_port); }
err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL, @@ -519,8 +519,6 @@ int prestera_bridge_port_join(struct net_device *br_dev, switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); err_switchdev_offload: prestera_bridge_port_put(br_port); -err_brport_create: - prestera_bridge_put(bridge); return err; }
@@ -1124,7 +1122,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused, prestera_port_obj_attr_set); break; default: - err = -EOPNOTSUPP; + return NOTIFY_DONE; }
return notifier_from_errno(err); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 250c5a24264d..edfdd44de579 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2131,7 +2131,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, max_ports = mlxsw_core_max_ports(mlxsw_sp->core); local_port = mlxsw_reg_pude_local_port_get(pude_pl);
- if (WARN_ON_ONCE(local_port >= max_ports)) + if (WARN_ON_ONCE(!local_port || local_port >= max_ports)) return; mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 4d5a5d6595b3..d64ce65a3c17 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -914,8 +914,7 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter) }
static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, - u8 duplex, u16 local_adv, - u16 remote_adv) + u16 local_adv, u16 remote_adv) { struct lan743x_phy *phy = &adapter->phy; u8 cap; @@ -943,7 +942,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
phy_print_status(phydev); if (phydev->state == PHY_RUNNING) { - struct ethtool_link_ksettings ksettings; int remote_advertisement = 0; int local_advertisement = 0;
@@ -980,18 +978,14 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) } lan743x_csr_write(adapter, MAC_CR, data);
- memset(&ksettings, 0, sizeof(ksettings)); - phy_ethtool_get_link_ksettings(netdev, &ksettings); local_advertisement = linkmode_adv_to_mii_adv_t(phydev->advertising); remote_advertisement = linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
- lan743x_phy_update_flowcontrol(adapter, - ksettings.base.duplex, - local_advertisement, + lan743x_phy_update_flowcontrol(adapter, local_advertisement, remote_advertisement); - lan743x_ptp_update_latency(adapter, ksettings.base.speed); + lan743x_ptp_update_latency(adapter, phydev->speed); } }
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a08e4f530c1c..00b5e6860bf6 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1175,12 +1175,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_SOME: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: @@ -1299,7 +1293,10 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port, SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | BIT(HWTSTAMP_TX_ONESTEP_SYNC); - info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index df203738511b..0b1865e9f0b5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -565,7 +565,6 @@ struct nfp_net_dp { * @exn_name: Name for Exception interrupt * @shared_handler: Handler for shared interrupts * @shared_name: Name for shared interrupt - * @me_freq_mhz: ME clock_freq (MHz) * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active, * @reconfig_sync_present and HW reconfiguration request * regs/machinery from async requests (sync must take @@ -650,8 +649,6 @@ struct nfp_net { irq_handler_t shared_handler; char shared_name[IFNAMSIZ + 8];
- u32 me_freq_mhz; - bool link_up; spinlock_t link_status_lock;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 0685ece1f155..be1a358baadb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1343,7 +1343,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev, * ME timestamp ticks. There are 16 ME clock cycles for each timestamp * count. */ - factor = nn->me_freq_mhz / 16; + factor = nn->tlv_caps.me_freq_mhz / 16;
/* Each pair of (usecs, max_frames) fields specifies that interrupts * should be coalesced until diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 43eead726886..5f129733aabd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -314,6 +314,7 @@ int stmmac_mdio_reset(struct mii_bus *mii); int stmmac_xpcs_setup(struct mii_bus *mii); void stmmac_set_ethtool_ops(struct net_device *netdev);
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags); void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_open(struct net_device *dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0ab20e2f984b..1cf94248c221 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -50,6 +50,13 @@ #include "dwxgmac2.h" #include "hwif.h"
+/* As long as the interface is active, we keep the timestamping counter enabled + * with fine resolution and binary rollover. This avoid non-monotonic behavior + * (clock jumps) when changing timestamping settings at runtime. + */ +#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ + PTP_TCR_TSCTRLSSR) + #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
@@ -613,8 +620,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) { struct stmmac_priv *priv = netdev_priv(dev); struct hwtstamp_config config; - struct timespec64 now; - u64 temp = 0; u32 ptp_v2 = 0; u32 tstamp_all = 0; u32 ptp_over_ipv4_udp = 0; @@ -623,11 +628,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) u32 snap_type_sel = 0; u32 ts_master_en = 0; u32 ts_event_en = 0; - u32 sec_inc = 0; - u32 value = 0; - bool xmac; - - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { netdev_alert(priv->dev, "No support for HW time stamping\n"); @@ -789,42 +789,17 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
- if (!priv->hwts_tx_en && !priv->hwts_rx_en) - stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); - else { - value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | - tstamp_all | ptp_v2 | ptp_over_ethernet | - ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | - ts_master_en | snap_type_sel); - stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); - - /* program Sub Second Increment reg */ - stmmac_config_sub_second_increment(priv, - priv->ptpaddr, priv->plat->clk_ptp_rate, - xmac, &sec_inc); - temp = div_u64(1000000000ULL, sec_inc); - - /* Store sub second increment and flags for later use */ - priv->sub_second_inc = sec_inc; - priv->systime_flags = value; - - /* calculate default added value: - * formula is : - * addend = (2^32)/freq_div_ratio; - * where, freq_div_ratio = 1e9ns/sec_inc - */ - temp = (u64)(temp << 32); - priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); - stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); - - /* initialize system time */ - ktime_get_real_ts64(&now); + priv->systime_flags = STMMAC_HWTS_ACTIVE;
- /* lower 32 bits of tv_sec are safe until y2106 */ - stmmac_init_systime(priv, priv->ptpaddr, - (u32)now.tv_sec, now.tv_nsec); + if (priv->hwts_tx_en || priv->hwts_rx_en) { + priv->systime_flags |= tstamp_all | ptp_v2 | + ptp_over_ethernet | ptp_over_ipv6_udp | + ptp_over_ipv4_udp | ts_event_en | + ts_master_en | snap_type_sel; }
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); + memcpy(&priv->tstamp_config, &config, sizeof(config));
return copy_to_user(ifr->ifr_data, &config, @@ -852,6 +827,66 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) sizeof(*config)) ? -EFAULT : 0; }
+/** + * stmmac_init_tstamp_counter - init hardware timestamping counter + * @priv: driver private structure + * @systime_flags: timestamping flags + * Description: + * Initialize hardware counter for packet timestamping. + * This is valid as long as the interface is open and not suspended. + * Will be rerun after resuming from suspend, case in which the timestamping + * flags updated by stmmac_hwtstamp_set() also need to be restored. + */ +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) +{ + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + struct timespec64 now; + u32 sec_inc = 0; + u64 temp = 0; + int ret; + + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) { + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + return ret; + } + + stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); + priv->systime_flags = systime_flags; + + /* program Sub Second Increment reg */ + stmmac_config_sub_second_increment(priv, priv->ptpaddr, + priv->plat->clk_ptp_rate, + xmac, &sec_inc); + temp = div_u64(1000000000ULL, sec_inc); + + /* Store sub second increment for later use */ + priv->sub_second_inc = sec_inc; + + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); + + /* initialize system time */ + ktime_get_real_ts64(&now); + + /* lower 32 bits of tv_sec are safe until y2106 */ + stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); + /** * stmmac_init_ptp - init PTP * @priv: driver private structure @@ -862,9 +897,11 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) static int stmmac_init_ptp(struct stmmac_priv *priv) { bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + int ret;
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) - return -EOPNOTSUPP; + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); + if (ret) + return ret;
priv->adv_ts = 0; /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ @@ -3268,10 +3305,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_mmc_setup(priv);
if (init_ptp) { - ret = clk_prepare_enable(priv->plat->clk_ptp_ref); - if (ret < 0) - netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); - ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_warn(priv->dev, "PTP not supported by HW\n"); @@ -3761,6 +3794,8 @@ int stmmac_release(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); u32 chan;
+ netif_tx_disable(dev); + if (device_may_wakeup(priv->device)) phylink_speed_down(priv->phylink, false); /* Stop and disconnect the PHY */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 232ac98943cd..5d29f336315b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -816,7 +816,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) if (ret) return ret;
- clk_prepare_enable(priv->plat->clk_ptp_ref); + stmmac_init_tstamp_counter(priv, priv->systime_flags); }
return 0; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index cff51731195a..d57472ea077f 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -661,22 +661,6 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa) wait_for_completion(&ipa->completion); }
-void ipa_cmd_pipeline_clear(struct ipa *ipa) -{ - u32 count = ipa_cmd_pipeline_clear_count(); - struct gsi_trans *trans; - - trans = ipa_cmd_trans_alloc(ipa, count); - if (trans) { - ipa_cmd_pipeline_clear_add(trans); - gsi_trans_commit_wait(trans); - ipa_cmd_pipeline_clear_wait(ipa); - } else { - dev_err(&ipa->pdev->dev, - "error allocating %u entry tag transaction\n", count); - } -} - static struct ipa_cmd_info * ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) { diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index 69cd085d427d..05ed7e42e184 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -163,12 +163,6 @@ u32 ipa_cmd_pipeline_clear_count(void); */ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
-/** - * ipa_cmd_pipeline_clear() - Clear the hardware pipeline - * @ipa: - IPA pointer - */ -void ipa_cmd_pipeline_clear(struct ipa *ipa); - /** * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint * @ipa: IPA pointer diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index ef790fd0ab56..03a170993420 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1636,8 +1636,6 @@ void ipa_endpoint_suspend(struct ipa *ipa) if (ipa->modem_netdev) ipa_modem_suspend(ipa->modem_netdev);
- ipa_cmd_pipeline_clear(ipa); - ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); } diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index cdfa98a76e1f..a448ec198bee 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -28,6 +28,7 @@ #include "ipa_reg.h" #include "ipa_mem.h" #include "ipa_table.h" +#include "ipa_smp2p.h" #include "ipa_modem.h" #include "ipa_uc.h" #include "ipa_interrupt.h" @@ -801,6 +802,11 @@ static int ipa_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; int ret;
+ /* Prevent the modem from triggering a call to ipa_setup(). This + * also ensures a modem-initiated setup that's underway completes. + */ + ipa_smp2p_irq_disable_setup(ipa); + ret = pm_runtime_get_sync(dev); if (WARN_ON(ret < 0)) goto out_power_put; diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index ad116bcc0580..d0ab4d70c303 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -339,9 +339,6 @@ int ipa_modem_stop(struct ipa *ipa) if (state != IPA_MODEM_STATE_RUNNING) return -EBUSY;
- /* Prevent the modem from triggering a call to ipa_setup() */ - ipa_smp2p_disable(ipa); - /* Clean up the netdev and endpoints if it was started */ if (netdev) { struct ipa_priv *priv = netdev_priv(netdev); @@ -369,6 +366,9 @@ static void ipa_modem_crashed(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; int ret;
+ /* Prevent the modem from triggering a call to ipa_setup() */ + ipa_smp2p_irq_disable_setup(ipa); + ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "error %d getting power to handle crash\n", ret); diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index df7639c39d71..211233612039 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -53,7 +53,7 @@ * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready * @power_on: Whether IPA power is on * @notified: Whether modem has been notified of power state - * @disabled: Whether setup ready interrupt handling is disabled + * @setup_disabled: Whether setup ready interrupt handler is disabled * @mutex: Mutex protecting ready-interrupt/shutdown interlock * @panic_notifier: Panic notifier structure */ @@ -67,7 +67,7 @@ struct ipa_smp2p { u32 setup_ready_irq; bool power_on; bool notified; - bool disabled; + bool setup_disabled; struct mutex mutex; struct notifier_block panic_notifier; }; @@ -155,11 +155,9 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) struct device *dev; int ret;
- mutex_lock(&smp2p->mutex); - - if (smp2p->disabled) - goto out_mutex_unlock; - smp2p->disabled = true; /* If any others arrive, ignore them */ + /* Ignore any (spurious) interrupts received after the first */ + if (smp2p->ipa->setup_complete) + return IRQ_HANDLED;
/* Power needs to be active for setup */ dev = &smp2p->ipa->pdev->dev; @@ -176,8 +174,6 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) out_power_put: pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); -out_mutex_unlock: - mutex_unlock(&smp2p->mutex);
return IRQ_HANDLED; } @@ -313,7 +309,7 @@ void ipa_smp2p_exit(struct ipa *ipa) kfree(smp2p); }
-void ipa_smp2p_disable(struct ipa *ipa) +void ipa_smp2p_irq_disable_setup(struct ipa *ipa) { struct ipa_smp2p *smp2p = ipa->smp2p;
@@ -322,7 +318,10 @@ void ipa_smp2p_disable(struct ipa *ipa)
mutex_lock(&smp2p->mutex);
- smp2p->disabled = true; + if (!smp2p->setup_disabled) { + disable_irq(smp2p->setup_ready_irq); + smp2p->setup_disabled = true; + }
mutex_unlock(&smp2p->mutex); } diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h index 99a956789638..59cee31a7383 100644 --- a/drivers/net/ipa/ipa_smp2p.h +++ b/drivers/net/ipa/ipa_smp2p.h @@ -27,13 +27,12 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init); void ipa_smp2p_exit(struct ipa *ipa);
/** - * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling + * ipa_smp2p_irq_disable_setup() - Disable the "setup ready" interrupt * @ipa: IPA pointer * - * Prevent handling of the "setup ready" interrupt from the modem. - * This is used before initiating shutdown of the driver. + * Disable the "ipa-setup-ready" interrupt from the modem. */ -void ipa_smp2p_disable(struct ipa *ipa); +void ipa_smp2p_irq_disable_setup(struct ipa *ipa);
/** * ipa_smp2p_notify_reset() - Reset modem notification state diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index cad820568f75..966c3b4ad59d 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, + !(ctrl & ASPEED_MDIO_CTRL_FIRE), + ASPEED_MDIO_INTERVAL_US, + ASPEED_MDIO_TIMEOUT_US); + if (rc < 0) + return rc; + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, data & ASPEED_MDIO_DATA_IDLE, ASPEED_MDIO_INTERVAL_US, diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 7ec3105010ac..fef1416dcee4 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -657,6 +657,7 @@ static void phylink_resolve(struct work_struct *w) struct phylink_link_state link_state; struct net_device *ndev = pl->netdev; bool mac_config = false; + bool retrigger = false; bool cur_link_state;
mutex_lock(&pl->state_mutex); @@ -670,6 +671,7 @@ static void phylink_resolve(struct work_struct *w) link_state.link = false; } else if (pl->mac_link_dropped) { link_state.link = false; + retrigger = true; } else { switch (pl->cur_link_an_mode) { case MLO_AN_PHY: @@ -686,6 +688,19 @@ static void phylink_resolve(struct work_struct *w) case MLO_AN_INBAND: phylink_mac_pcs_get_state(pl, &link_state);
+ /* The PCS may have a latching link-fail indicator. + * If the link was up, bring the link down and + * re-trigger the resolve. Otherwise, re-read the + * PCS state to get the current status of the link. + */ + if (!link_state.link) { + if (cur_link_state) + retrigger = true; + else + phylink_mac_pcs_get_state(pl, + &link_state); + } + /* If we have a phy, the "up" state is the union of * both the PHY and the MAC */ @@ -694,6 +709,15 @@ static void phylink_resolve(struct work_struct *w)
/* Only update if the PHY link is up */ if (pl->phydev && pl->phy_state.link) { + /* If the interface has changed, force a + * link down event if the link isn't already + * down, and re-resolve. + */ + if (link_state.interface != + pl->phy_state.interface) { + retrigger = true; + link_state.link = false; + } link_state.interface = pl->phy_state.interface;
/* If we have a PHY, we need to update with @@ -736,7 +760,7 @@ static void phylink_resolve(struct work_struct *w) else phylink_link_up(pl, link_state); } - if (!link_state.link && pl->mac_link_dropped) { + if (!link_state.link && retrigger) { pl->mac_link_dropped = false; queue_work(system_power_efficient_wq, &pl->resolve); } diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 26b1bd8e845b..f91dabd65ecd 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1049,6 +1049,14 @@ static const struct net_device_ops smsc95xx_netdev_ops = { .ndo_set_features = smsc95xx_set_features, };
+static void smsc95xx_handle_link_change(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + phy_print_status(net->phydev); + usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); +} + static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata; @@ -1153,6 +1161,17 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->min_mtu = ETH_MIN_MTU; dev->net->max_mtu = ETH_DATA_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + + ret = phy_connect_direct(dev->net, pdata->phydev, + &smsc95xx_handle_link_change, + PHY_INTERFACE_MODE_MII); + if (ret) { + netdev_err(dev->net, "can't attach PHY to %s\n", pdata->mdiobus->id); + goto unregister_mdio; + } + + phy_attached_info(dev->net->phydev); + return 0;
unregister_mdio: @@ -1170,47 +1189,25 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata = dev->driver_priv;
+ phy_disconnect(dev->net->phydev); mdiobus_unregister(pdata->mdiobus); mdiobus_free(pdata->mdiobus); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); }
-static void smsc95xx_handle_link_change(struct net_device *net) -{ - struct usbnet *dev = netdev_priv(net); - - phy_print_status(net->phydev); - usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); -} - static int smsc95xx_start_phy(struct usbnet *dev) { - struct smsc95xx_priv *pdata = dev->driver_priv; - struct net_device *net = dev->net; - int ret; + phy_start(dev->net->phydev);
- ret = smsc95xx_reset(dev); - if (ret < 0) - return ret; - - ret = phy_connect_direct(net, pdata->phydev, - &smsc95xx_handle_link_change, - PHY_INTERFACE_MODE_MII); - if (ret) { - netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id); - return ret; - } - - phy_attached_info(net->phydev); - phy_start(net->phydev); return 0; }
-static int smsc95xx_disconnect_phy(struct usbnet *dev) +static int smsc95xx_stop(struct usbnet *dev) { - phy_stop(dev->net->phydev); - phy_disconnect(dev->net->phydev); + if (dev->net->phydev) + phy_stop(dev->net->phydev); + return 0; }
@@ -1965,7 +1962,7 @@ static const struct driver_info smsc95xx_info = { .unbind = smsc95xx_unbind, .link_reset = smsc95xx_link_reset, .reset = smsc95xx_start_phy, - .stop = smsc95xx_disconnect_phy, + .stop = smsc95xx_stop, .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, .status = smsc95xx_status, diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 1dd1a0fe2e81..df7e033dd273 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -8,6 +8,7 @@ #include <linux/uio.h> #include <linux/falloc.h> #include <linux/file.h> +#include <linux/fs.h> #include "nvmet.h"
#define NVMET_MAX_MPOOL_BVEC 16 @@ -266,7 +267,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
if (req->ns->buffered_io) { if (likely(!req->f.mpool_alloc) && - nvmet_file_execute_io(req, IOCB_NOWAIT)) + (req->ns->file->f_mode & FMODE_NOWAIT) && + nvmet_file_execute_io(req, IOCB_NOWAIT)) return; nvmet_file_submit_buffered_io(req); } else diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 84c387e4bf43..2b8bab28417b 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -700,10 +700,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct nvmet_tcp_queue *queue = cmd->queue; + int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, - .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset + .iov_len = left }; int ret;
@@ -717,6 +718,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) return ret;
cmd->offset += ret; + left -= ret; + + if (left) + return -EAGAIN;
if (queue->nvme_sq.sqhd_disabled) { cmd->queue->snd_cmd = NULL; diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index b051d127f0af..c5300d49807a 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -299,11 +299,6 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg) return readl(pcie->base + reg); }
-static inline u16 advk_read16(struct advk_pcie *pcie, u64 reg) -{ - return advk_readl(pcie, (reg & ~0x3)) >> ((reg & 0x3) * 8); -} - static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie) { u32 val; @@ -377,23 +372,9 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
static void advk_pcie_issue_perst(struct advk_pcie *pcie) { - u32 reg; - if (!pcie->reset_gpio) return;
- /* - * As required by PCI Express spec (PCI Express Base Specification, REV. - * 4.0 PCI Express, February 19 2014, 6.6.1 Conventional Reset) a delay - * for at least 100ms after de-asserting PERST# signal is needed before - * link training is enabled. So ensure that link training is disabled - * prior de-asserting PERST# signal to fulfill that PCI Express spec - * requirement. - */ - reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); - reg &= ~LINK_TRAINING_EN; - advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); - /* 10ms delay is needed for some cards */ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n"); gpiod_set_value_cansleep(pcie->reset_gpio, 1); @@ -401,53 +382,46 @@ static void advk_pcie_issue_perst(struct advk_pcie *pcie) gpiod_set_value_cansleep(pcie->reset_gpio, 0); }
-static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen) +static void advk_pcie_train_link(struct advk_pcie *pcie) { - int ret, neg_gen; + struct device *dev = &pcie->pdev->dev; u32 reg; + int ret;
- /* Setup link speed */ + /* + * Setup PCIe rev / gen compliance based on device tree property + * 'max-link-speed' which also forces maximal link speed. + */ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); reg &= ~PCIE_GEN_SEL_MSK; - if (gen == 3) + if (pcie->link_gen == 3) reg |= SPEED_GEN_3; - else if (gen == 2) + else if (pcie->link_gen == 2) reg |= SPEED_GEN_2; else reg |= SPEED_GEN_1; advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
/* - * Enable link training. This is not needed in every call to this - * function, just once suffices, but it does not break anything either. + * Set maximal link speed value also into PCIe Link Control 2 register. + * Armada 3700 Functional Specification says that default value is based + * on SPEED_GEN but tests showed that default value is always 8.0 GT/s. */ + reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2); + reg &= ~PCI_EXP_LNKCTL2_TLS; + if (pcie->link_gen == 3) + reg |= PCI_EXP_LNKCTL2_TLS_8_0GT; + else if (pcie->link_gen == 2) + reg |= PCI_EXP_LNKCTL2_TLS_5_0GT; + else + reg |= PCI_EXP_LNKCTL2_TLS_2_5GT; + advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2); + + /* Enable link training after selecting PCIe generation */ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); reg |= LINK_TRAINING_EN; advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
- /* - * Start link training immediately after enabling it. - * This solves problems for some buggy cards. - */ - reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL); - reg |= PCI_EXP_LNKCTL_RL; - advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL); - - ret = advk_pcie_wait_for_link(pcie); - if (ret) - return ret; - - reg = advk_read16(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKSTA); - neg_gen = reg & PCI_EXP_LNKSTA_CLS; - - return neg_gen; -} - -static void advk_pcie_train_link(struct advk_pcie *pcie) -{ - struct device *dev = &pcie->pdev->dev; - int neg_gen = -1, gen; - /* * Reset PCIe card via PERST# signal. Some cards are not detected * during link training when they are in some non-initial state. @@ -458,41 +432,18 @@ static void advk_pcie_train_link(struct advk_pcie *pcie) * PERST# signal could have been asserted by pinctrl subsystem before * probe() callback has been called or issued explicitly by reset gpio * function advk_pcie_issue_perst(), making the endpoint going into - * fundamental reset. As required by PCI Express spec a delay for at - * least 100ms after such a reset before link training is needed. - */ - msleep(PCI_PM_D3COLD_WAIT); - - /* - * Try link training at link gen specified by device tree property - * 'max-link-speed'. If this fails, iteratively train at lower gen. - */ - for (gen = pcie->link_gen; gen > 0; --gen) { - neg_gen = advk_pcie_train_at_gen(pcie, gen); - if (neg_gen > 0) - break; - } - - if (neg_gen < 0) - goto err; - - /* - * After successful training if negotiated gen is lower than requested, - * train again on negotiated gen. This solves some stability issues for - * some buggy gen1 cards. + * fundamental reset. As required by PCI Express spec (PCI Express + * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1 + * Conventional Reset) a delay for at least 100ms after such a reset + * before sending a Configuration Request to the device is needed. + * So wait until PCIe link is up. Function advk_pcie_wait_for_link() + * waits for link at least 900ms. */ - if (neg_gen < gen) { - gen = neg_gen; - neg_gen = advk_pcie_train_at_gen(pcie, gen); - } - - if (neg_gen == gen) { - dev_info(dev, "link up at gen %i\n", gen); - return; - } - -err: - dev_err(dev, "link never came up\n"); + ret = advk_pcie_wait_for_link(pcie); + if (ret < 0) + dev_err(dev, "link never came up\n"); + else + dev_info(dev, "link up\n"); }
/* @@ -692,6 +643,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 u32 reg; unsigned int status; char *strcomp_status, *str_posted; + int ret;
reg = advk_readl(pcie, PIO_STAT); status = (reg & PIO_COMPLETION_STATUS_MASK) >> @@ -716,6 +668,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 case PIO_COMPLETION_STATUS_OK: if (reg & PIO_ERR_STATUS) { strcomp_status = "COMP_ERR"; + ret = -EFAULT; break; } /* Get the read result */ @@ -723,9 +676,11 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 *val = advk_readl(pcie, PIO_RD_DATA); /* No error */ strcomp_status = NULL; + ret = 0; break; case PIO_COMPLETION_STATUS_UR: strcomp_status = "UR"; + ret = -EOPNOTSUPP; break; case PIO_COMPLETION_STATUS_CRS: if (allow_crs && val) { @@ -743,6 +698,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 */ *val = CFG_RD_CRS_VAL; strcomp_status = NULL; + ret = 0; break; } /* PCIe r4.0, sec 2.3.2, says: @@ -758,21 +714,24 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 * Request and taking appropriate action, e.g., complete the * Request to the host as a failed transaction. * - * To simplify implementation do not re-issue the Configuration - * Request and complete the Request as a failed transaction. + * So return -EAGAIN and caller (pci-aardvark.c driver) will + * re-issue request again up to the PIO_RETRY_CNT retries. */ strcomp_status = "CRS"; + ret = -EAGAIN; break; case PIO_COMPLETION_STATUS_CA: strcomp_status = "CA"; + ret = -ECANCELED; break; default: strcomp_status = "Unknown"; + ret = -EINVAL; break; }
if (!strcomp_status) - return 0; + return ret;
if (reg & PIO_NON_POSTED_REQ) str_posted = "Non-posted"; @@ -782,7 +741,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3 dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n", str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
- return -EFAULT; + return ret; }
static int advk_pcie_wait_pio(struct advk_pcie *pcie) @@ -790,13 +749,13 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie) struct device *dev = &pcie->pdev->dev; int i;
- for (i = 0; i < PIO_RETRY_CNT; i++) { + for (i = 1; i <= PIO_RETRY_CNT; i++) { u32 start, isr;
start = advk_readl(pcie, PIO_START); isr = advk_readl(pcie, PIO_ISR); if (!start && isr) - return 0; + return i; udelay(PIO_RETRY_DELAY); }
@@ -984,7 +943,6 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) { struct pci_bridge_emul *bridge = &pcie->bridge; - int ret;
bridge->conf.vendor = cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff); @@ -1004,19 +962,14 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) /* Support interrupt A for MSI feature */ bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
+ /* Indicates supports for Completion Retry Status */ + bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); + bridge->has_pcie = true; bridge->data = pcie; bridge->ops = &advk_pci_bridge_emul_ops;
- /* PCIe config space can be initialized after pci_bridge_emul_init() */ - ret = pci_bridge_emul_init(bridge, 0); - if (ret < 0) - return ret; - - /* Indicates supports for Completion Retry Status */ - bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); - - return 0; + return pci_bridge_emul_init(bridge, 0); }
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, @@ -1068,6 +1021,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct advk_pcie *pcie = bus->sysdata; + int retry_count; bool allow_crs; u32 reg; int ret; @@ -1090,18 +1044,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE);
- if (advk_pcie_pio_is_running(pcie)) { - /* - * If it is possible return Completion Retry Status so caller - * tries to issue the request again instead of failing. - */ - if (allow_crs) { - *val = CFG_RD_CRS_VAL; - return PCIBIOS_SUCCESSFUL; - } - *val = 0xffffffff; - return PCIBIOS_SET_FAILED; - } + if (advk_pcie_pio_is_running(pcie)) + goto try_crs;
/* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); @@ -1120,30 +1064,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, /* Program the data strobe */ advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
- /* Clear PIO DONE ISR and start the transfer */ - advk_writel(pcie, 1, PIO_ISR); - advk_writel(pcie, 1, PIO_START); + retry_count = 0; + do { + /* Clear PIO DONE ISR and start the transfer */ + advk_writel(pcie, 1, PIO_ISR); + advk_writel(pcie, 1, PIO_START);
- ret = advk_pcie_wait_pio(pcie); - if (ret < 0) { - /* - * If it is possible return Completion Retry Status so caller - * tries to issue the request again instead of failing. - */ - if (allow_crs) { - *val = CFG_RD_CRS_VAL; - return PCIBIOS_SUCCESSFUL; - } - *val = 0xffffffff; - return PCIBIOS_SET_FAILED; - } + ret = advk_pcie_wait_pio(pcie); + if (ret < 0) + goto try_crs;
- /* Check PIO status and get the read result */ - ret = advk_pcie_check_pio_status(pcie, allow_crs, val); - if (ret < 0) { - *val = 0xffffffff; - return PCIBIOS_SET_FAILED; - } + retry_count += ret; + + /* Check PIO status and get the read result */ + ret = advk_pcie_check_pio_status(pcie, allow_crs, val); + } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); + + if (ret < 0) + goto fail;
if (size == 1) *val = (*val >> (8 * (where & 3))) & 0xff; @@ -1151,6 +1089,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, *val = (*val >> (8 * (where & 3))) & 0xffff;
return PCIBIOS_SUCCESSFUL; + +try_crs: + /* + * If it is possible, return Completion Retry Status so that caller + * tries to issue the request again instead of failing. + */ + if (allow_crs) { + *val = CFG_RD_CRS_VAL; + return PCIBIOS_SUCCESSFUL; + } + +fail: + *val = 0xffffffff; + return PCIBIOS_SET_FAILED; }
static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, @@ -1159,6 +1111,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, struct advk_pcie *pcie = bus->sysdata; u32 reg; u32 data_strobe = 0x0; + int retry_count; int offset; int ret;
@@ -1200,19 +1153,22 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, /* Program the data strobe */ advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
- /* Clear PIO DONE ISR and start the transfer */ - advk_writel(pcie, 1, PIO_ISR); - advk_writel(pcie, 1, PIO_START); + retry_count = 0; + do { + /* Clear PIO DONE ISR and start the transfer */ + advk_writel(pcie, 1, PIO_ISR); + advk_writel(pcie, 1, PIO_START);
- ret = advk_pcie_wait_pio(pcie); - if (ret < 0) - return PCIBIOS_SET_FAILED; + ret = advk_pcie_wait_pio(pcie); + if (ret < 0) + return PCIBIOS_SET_FAILED;
- ret = advk_pcie_check_pio_status(pcie, false, NULL); - if (ret < 0) - return PCIBIOS_SET_FAILED; + retry_count += ret;
- return PCIBIOS_SUCCESSFUL; + ret = advk_pcie_check_pio_status(pcie, false, NULL); + } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); + + return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL; }
static struct pci_ops advk_pcie_ops = { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 27eb652b564f..81dab9b82f79 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -639,8 +639,8 @@ static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc) mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP; current_time = ktime_get_real(); TimeStamp = ktime_to_ms(current_time); - mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF); - mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32); + mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32); + mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); init_completion(&ioc->scsih_cmds.done); ioc->put_smid_default(ioc, smid); dinitprintk(ioc, ioc_info(ioc, diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index f87c0911f66a..1b3a44ce65aa 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -142,6 +142,8 @@
#define MPT_MAX_CALLBACKS 32
+#define MPT_MAX_HBA_NUM_PHYS 32 + #define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ /* reserved for issuing internally framed scsi io cmds */ #define INTERNAL_SCSIIO_CMDS_COUNT 3 @@ -798,6 +800,7 @@ struct _sas_phy { * @enclosure_handle: handle for this a member of an enclosure * @device_info: bitwise defining capabilities of this sas_host/expander * @responding: used in _scsih_expander_device_mark_responding + * @nr_phys_allocated: Allocated memory for this many count phys * @phy: a list of phys that make up this sas_host/expander * @sas_port_list: list of ports attached to this sas_host/expander * @port: hba port entry containing node's port number info @@ -813,6 +816,7 @@ struct _sas_node { u16 enclosure_handle; u64 enclosure_logical_id; u8 responding; + u8 nr_phys_allocated; struct hba_port *port; struct _sas_phy *phy; struct list_head sas_port_list; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index ad1b6c2b37a7..c1f900c6ea00 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3869,7 +3869,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; - if (!sas_device_priv_data) + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) continue; if (sas_device_priv_data->sas_target->sas_address != sas_address) @@ -6406,11 +6406,26 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) int i, j, count = 0, lcount = 0; int ret; u64 sas_addr; + u8 num_phys;
drsprintk(ioc, ioc_info(ioc, "updating ports for sas_host(0x%016llx)\n", (unsigned long long)ioc->sas_hba.sas_address));
+ mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + if (num_phys > ioc->sas_hba.nr_phys_allocated) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + port_table = kcalloc(ioc->sas_hba.num_phys, sizeof(struct hba_port), GFP_KERNEL); if (!port_table) @@ -6611,6 +6626,30 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) ioc->sas_hba.phy[i].hba_vphy = 1; }
+ /* + * Add new HBA phys to STL if these new phys got added as part + * of HBA Firmware upgrade/downgrade operation. + */ + if (!ioc->sas_hba.phy[i].phy) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, + &phy_pg0, i))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc->sas_hba.phy[i].phy_id = i; + mpt3sas_transport_add_host_phy(ioc, + &ioc->sas_hba.phy[i], phy_pg0, + ioc->sas_hba.parent_dev); + continue; + } ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. AttachedDevHandle); @@ -6622,6 +6661,19 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) attached_handle, i, link_rate, ioc->sas_hba.phy[i].port); } + /* + * Clear the phy details if this phy got disabled as part of + * HBA Firmware upgrade/downgrade operation. + */ + for (i = ioc->sas_hba.num_phys; + i < ioc->sas_hba.nr_phys_allocated; i++) { + if (ioc->sas_hba.phy[i].phy && + ioc->sas_hba.phy[i].phy->negotiated_linkrate >= + SAS_LINK_RATE_1_5_GBPS) + mpt3sas_transport_update_links(ioc, + ioc->sas_hba.sas_address, 0, i, + MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL); + } out: kfree(sas_iounit_pg0); } @@ -6654,7 +6706,10 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) __FILE__, __LINE__, __func__); return; } - ioc->sas_hba.phy = kcalloc(num_phys, + + ioc->sas_hba.nr_phys_allocated = max_t(u8, + MPT_MAX_HBA_NUM_PHYS, num_phys); + ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated, sizeof(struct _sas_phy), GFP_KERNEL); if (!ioc->sas_hba.phy) { ioc_err(ioc, "failure at %s:%d/%s()!\n", diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c index 9240e788b011..a04693498dc0 100644 --- a/drivers/scsi/qla2xxx/qla_edif.c +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -865,7 +865,7 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) "APP request entry - portid=%06x.\n", tdid.b24);
/* Ran out of space */ - if (pcnt > app_req.num_ports) + if (pcnt >= app_req.num_ports) break;
if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index ead65cdfb522..1b1a63a46781 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -4649,6 +4649,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip, struct sdeb_zone_state *zsp) { enum sdebug_z_cond zc; + struct sdeb_store_info *sip = devip2sip(devip, false);
if (zbc_zone_is_conv(zsp)) return; @@ -4660,6 +4661,10 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip, if (zsp->z_cond == ZC4_CLOSED) devip->nr_closed--;
+ if (zsp->z_wp > zsp->z_start) + memset(sip->storep + zsp->z_start * sdebug_sector_size, 0, + (zsp->z_wp - zsp->z_start) * sdebug_sector_size); + zsp->z_non_seq_resource = false; zsp->z_wp = zsp->z_start; zsp->z_cond = ZC1_EMPTY; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 9527e734a999..920aae661c5b 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -817,7 +817,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
mutex_lock(&sdev->state_mutex); if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { - ret = count; + ret = 0; } else { ret = scsi_device_set_state(sdev, state); if (ret == 0 && state == SDEV_RUNNING) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index fce63335084e..78ead3369779 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2607,6 +2607,13 @@ sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage, unsigned char *buffer, int len, struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { + /* + * If we must use MODE SENSE(10), make sure that the buffer length + * is at least 8 bytes so that the mode sense header fits. + */ + if (sdkp->device->use_10_for_ms && len < 8) + len = 8; + return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len, SD_TIMEOUT, sdkp->max_retries, data, sshdr); diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c index cf263a58a148..6fd549a424d5 100644 --- a/drivers/staging/fbtft/fb_ssd1351.c +++ b/drivers/staging/fbtft/fb_ssd1351.c @@ -187,7 +187,6 @@ static struct fbtft_display display = { }, };
-#ifdef CONFIG_FB_BACKLIGHT static int update_onboard_backlight(struct backlight_device *bd) { struct fbtft_par *par = bl_get_data(bd); @@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par) if (!par->fbtftops.unregister_backlight) par->fbtftops.unregister_backlight = fbtft_unregister_backlight; } -#else -static void register_onboard_backlight(struct fbtft_par *par) { }; -#endif
FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index ed992ca605eb..1690358b8f01 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -128,7 +128,6 @@ static int fbtft_request_gpios(struct fbtft_par *par) return 0; }
-#ifdef CONFIG_FB_BACKLIGHT static int fbtft_backlight_update_status(struct backlight_device *bd) { struct fbtft_par *par = bl_get_data(bd); @@ -161,6 +160,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par) par->info->bl_dev = NULL; } } +EXPORT_SYMBOL(fbtft_unregister_backlight);
static const struct backlight_ops fbtft_bl_ops = { .get_brightness = fbtft_backlight_get_brightness, @@ -198,12 +198,7 @@ void fbtft_register_backlight(struct fbtft_par *par) if (!par->fbtftops.unregister_backlight) par->fbtftops.unregister_backlight = fbtft_unregister_backlight; } -#else -void fbtft_register_backlight(struct fbtft_par *par) { }; -void fbtft_unregister_backlight(struct fbtft_par *par) { }; -#endif EXPORT_SYMBOL(fbtft_register_backlight); -EXPORT_SYMBOL(fbtft_unregister_backlight);
static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) @@ -853,13 +848,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info) fb_info->fix.smem_len >> 10, text1, HZ / fb_info->fbdefio->delay, text2);
-#ifdef CONFIG_FB_BACKLIGHT /* Turn on backlight if available */ if (fb_info->bl_dev) { fb_info->bl_dev->props.power = FB_BLANK_UNBLANK; fb_info->bl_dev->ops->update_status(fb_info->bl_dev); } -#endif
return 0;
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c index 1ed4772d2771..843760675876 100644 --- a/drivers/staging/greybus/audio_helper.c +++ b/drivers/staging/greybus/audio_helper.c @@ -192,7 +192,11 @@ int gbaudio_remove_component_controls(struct snd_soc_component *component, unsigned int num_controls) { struct snd_card *card = component->card->snd_card; + int err;
- return gbaudio_remove_controls(card, component->dev, controls, - num_controls, component->name_prefix); + down_write(&card->controls_rwsem); + err = gbaudio_remove_controls(card, component->dev, controls, + num_controls, component->name_prefix); + up_write(&card->controls_rwsem); + return err; } diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c index 5a472a4954b0..63d312d01171 100644 --- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c @@ -104,6 +104,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = { {0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */ {0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */ {0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */ + {0x00}, /* 0x13 */ {0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */ {0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */ {0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */ @@ -115,6 +116,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = { {0x00}, /* 0x1C, */ {0x00}, /* 0x1D, */ {0x00}, /* 0x1E, */ + {0x00}, /* 0x1F, */ /* 0x20 ~ 0x7F , New Define ===== */ {0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */ {0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */ @@ -7080,12 +7082,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC); if (!pcmd_obj) return;
cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header)); - pevtcmd = kzalloc(cmdsz, GFP_KERNEL); + pevtcmd = kzalloc(cmdsz, GFP_ATOMIC); if (!pevtcmd) { kfree(pcmd_obj); return; diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c index 013c322b98a2..0eccce57c63a 100644 --- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c @@ -2061,6 +2061,7 @@ static int rtw_wx_read32(struct net_device *dev, u32 data32; u32 bytes; u8 *ptmp; + int ret;
padapter = (struct adapter *)rtw_netdev_priv(dev); p = &wrqu->data; @@ -2093,12 +2094,17 @@ static int rtw_wx_read32(struct net_device *dev, break; default: DBG_88E(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__); - return -EINVAL; + ret = -EINVAL; + goto err_free_ptmp; } DBG_88E(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra);
kfree(ptmp); return 0; + +err_free_ptmp: + kfree(ptmp); + return ret; }
static int rtw_wx_write32(struct net_device *dev, diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c index e3ee9dc7ab90..b0d1e20edc4c 100644 --- a/drivers/staging/r8188eu/os_dep/mlme_linux.c +++ b/drivers/staging/r8188eu/os_dep/mlme_linux.c @@ -114,7 +114,7 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
buff = NULL; if (authmode == _WPA_IE_ID_) { - buff = kzalloc(IW_CUSTOM_MAX, GFP_KERNEL); + buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC); if (!buff) return; p = buff; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index a7dd1578b2c6..616ab3c8fde4 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -2549,13 +2549,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev) free_irq(dev->irq, dev); priv->irq = 0; } - free_rtllib(dev);
if (dev->mem_start != 0) { iounmap((void __iomem *)dev->mem_start); release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); } + + free_rtllib(dev); }
pci_disable_device(pdev); diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index f1d100671ee6..097142ffb184 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -420,15 +420,15 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0); if (IS_ERR(data->phy)) { ret = PTR_ERR(data->phy); - if (ret == -ENODEV) { - data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0); - if (IS_ERR(data->phy)) { - ret = PTR_ERR(data->phy); - if (ret == -ENODEV) - data->phy = NULL; - else - goto err_clk; - } + if (ret != -ENODEV) + goto err_clk; + data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0); + if (IS_ERR(data->phy)) { + ret = PTR_ERR(data->phy); + if (ret == -ENODEV) + data->phy = NULL; + else + goto err_clk; } }
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 86658a81d284..00070a8a6507 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4700,8 +4700,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, if (oldspeed == USB_SPEED_LOW) delay = HUB_LONG_RESET_TIME;
- mutex_lock(hcd->address0_mutex); - /* Reset the device; full speed may morph to high speed */ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ retval = hub_port_reset(hub, port1, udev, delay, false); @@ -5016,7 +5014,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, hub_port_disable(hub, port1, 0); update_devnum(udev, devnum); /* for disconnect processing */ } - mutex_unlock(hcd->address0_mutex); return retval; }
@@ -5191,6 +5188,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; static int unreliable_port = -1; + bool retry_locked;
/* Disconnect any existing devices under this port */ if (udev) { @@ -5246,8 +5244,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, unit_load = 100;
status = 0; - for (i = 0; i < PORT_INIT_TRIES; i++) {
+ for (i = 0; i < PORT_INIT_TRIES; i++) { + usb_lock_port(port_dev); + mutex_lock(hcd->address0_mutex); + retry_locked = true; /* reallocate for each attempt, since references * to the previous one can escape in various ways */ @@ -5255,6 +5256,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, if (!udev) { dev_err(&port_dev->dev, "couldn't allocate usb_device\n"); + mutex_unlock(hcd->address0_mutex); + usb_unlock_port(port_dev); goto done; }
@@ -5276,12 +5279,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, }
/* reset (non-USB 3.0 devices) and get descriptor */ - usb_lock_port(port_dev); status = hub_port_init(hub, udev, port1, i); - usb_unlock_port(port_dev); if (status < 0) goto loop;
+ mutex_unlock(hcd->address0_mutex); + usb_unlock_port(port_dev); + retry_locked = false; + if (udev->quirks & USB_QUIRK_DELAY_INIT) msleep(2000);
@@ -5374,6 +5379,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, usb_ep0_reinit(udev); release_devnum(udev); hub_free_dev(udev); + if (retry_locked) { + mutex_unlock(hcd->address0_mutex); + usb_unlock_port(port_dev); + } usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; @@ -5915,6 +5924,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev) bos = udev->bos; udev->bos = NULL;
+ mutex_lock(hcd->address0_mutex); + for (i = 0; i < PORT_INIT_TRIES; ++i) {
/* ep0 maxpacket size may change; let the HCD know about it. @@ -5924,6 +5935,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV) break; } + mutex_unlock(hcd->address0_mutex);
if (ret < 0) goto re_enumerate; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 11d85a6e0b0d..2190225bf3da 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, } ctrl |= DXEPCTL_CNAK; } else { + hs_req->req.frame_number = hs_ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); return; } @@ -2857,9 +2859,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
do { hs_req = get_ep_head(hs_ep); - if (hs_req) + if (hs_req) { + hs_req->req.frame_number = hs_ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); + } dwc2_gadget_incr_frame_num(hs_ep); /* Update current frame number value. */ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg); @@ -2912,8 +2917,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
while (dwc2_gadget_target_frame_elapsed(ep)) { hs_req = get_ep_head(ep); - if (hs_req) + if (hs_req) { + hs_req->req.frame_number = ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA); + }
dwc2_gadget_incr_frame_num(ep); /* Update current frame number value. */ @@ -3002,8 +3010,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
while (dwc2_gadget_target_frame_elapsed(hs_ep)) { hs_req = get_ep_head(hs_ep); - if (hs_req) + if (hs_req) { + hs_req->req.frame_number = hs_ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); + }
dwc2_gadget_incr_frame_num(hs_ep); /* Update current frame number value. */ diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 89a788326c56..24beff610cf2 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -59,7 +59,7 @@ #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
/* If we get a NAK, wait this long before retrying */ -#define DWC2_RETRY_WAIT_DELAY (1 * 1E6L) +#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
/** * dwc2_periodic_channel_available() - Checks that a channel is available for a diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 0104a80b185e..357b7805896e 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1565,9 +1565,11 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); - if (ret) - return ret; + if (!dwc->sysdev_is_parent) { + ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + }
dwc->reset = devm_reset_control_array_get_optional_shared(dev); if (IS_ERR(dwc->reset)) diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 0c100901a784..fd5d42ec5350 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -143,7 +143,7 @@ #define DWC3_GHWPARAMS8 0xc600 #define DWC3_GUCTL3 0xc60c #define DWC3_GFLADJ 0xc630 -#define DWC3_GHWPARAMS9 0xc680 +#define DWC3_GHWPARAMS9 0xc6e0
/* Device Registers */ #define DWC3_DCFG 0xc700 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index ed97e47d3261..4c16805a2b31 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -310,13 +310,24 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { int link_state;
+ /* + * Initiate remote wakeup if the link state is in U3 when + * operating in SS/SSP or L1/L2 when operating in HS/FS. If the + * link state is in U1/U2, no remote wakeup is needed. The Start + * Transfer command will initiate the link recovery. + */ link_state = dwc3_gadget_get_link_state(dwc); - if (link_state == DWC3_LINK_STATE_U1 || - link_state == DWC3_LINK_STATE_U2 || - link_state == DWC3_LINK_STATE_U3) { + switch (link_state) { + case DWC3_LINK_STATE_U2: + if (dwc->gadget->speed >= USB_SPEED_SUPER) + break; + + fallthrough; + case DWC3_LINK_STATE_U3: ret = __dwc3_gadget_wakeup(dwc); dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", ret); + break; } }
@@ -3252,6 +3263,9 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep, struct dwc3 *dwc = dep->dwc; bool no_started_trb = true;
+ if (!dep->endpoint.desc) + return no_started_trb; + dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) @@ -3299,6 +3313,9 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, { int status = 0;
+ if (!dep->endpoint.desc) + return; + if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) dwc3_gadget_endpoint_frame_from_event(dep, event);
@@ -3352,6 +3369,14 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep, if (cmd != DWC3_DEPCMD_ENDTRANSFER) return;
+ /* + * The END_TRANSFER command will cause the controller to generate a + * NoStream Event, and it's not due to the host DP NoStream rejection. + * Ignore the next NoStream event. + */ + if (dep->stream_capable) + dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM; + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; dep->flags &= ~DWC3_EP_TRANSFER_STARTED; dwc3_gadget_ep_cleanup_cancelled_requests(dep); @@ -3574,14 +3599,6 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, WARN_ON_ONCE(ret); dep->resource_index = 0;
- /* - * The END_TRANSFER command will cause the controller to generate a - * NoStream Event, and it's not due to the host DP NoStream rejection. - * Ignore the next NoStream event. - */ - if (dep->stream_capable) - dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM; - if (!interrupt) dep->flags &= ~DWC3_EP_TRANSFER_STARTED; else diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 1bf494b649bd..c8af2cd2216d 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1400,6 +1400,7 @@ static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra)
static int tegra_xusb_probe(struct platform_device *pdev) { + struct of_phandle_args args; struct tegra_xusb *tegra; struct device_node *np; struct resource *regs; @@ -1454,10 +1455,17 @@ static int tegra_xusb_probe(struct platform_device *pdev) goto put_padctl; }
- tegra->padctl_irq = of_irq_get(np, 0); - if (tegra->padctl_irq <= 0) { - err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq; - goto put_padctl; + /* Older device-trees don't have padctrl interrupt */ + err = of_irq_parse_one(np, 0, &args); + if (!err) { + tegra->padctl_irq = of_irq_get(np, 0); + if (tegra->padctl_irq <= 0) { + err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq; + goto put_padctl; + } + } else { + dev_dbg(&pdev->dev, + "%pOF is missing an interrupt, disabling PM support\n", np); }
tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host"); @@ -1696,11 +1704,15 @@ static int tegra_xusb_probe(struct platform_device *pdev) goto remove_usb3; }
- err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, NULL, tegra_xusb_padctl_irq, - IRQF_ONESHOT, dev_name(&pdev->dev), tegra); - if (err < 0) { - dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err); - goto remove_usb3; + if (tegra->padctl_irq) { + err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, + NULL, tegra_xusb_padctl_irq, + IRQF_ONESHOT, dev_name(&pdev->dev), + tegra); + if (err < 0) { + dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err); + goto remove_usb3; + } }
err = tegra_xusb_enable_firmware_messages(tegra); @@ -1718,13 +1730,16 @@ static int tegra_xusb_probe(struct platform_device *pdev) /* Enable wake for both USB 2.0 and USB 3.0 roothubs */ device_init_wakeup(&tegra->hcd->self.root_hub->dev, true); device_init_wakeup(&xhci->shared_hcd->self.root_hub->dev, true); - device_init_wakeup(tegra->dev, true);
pm_runtime_use_autosuspend(tegra->dev); pm_runtime_set_autosuspend_delay(tegra->dev, 2000); pm_runtime_mark_last_busy(tegra->dev); pm_runtime_set_active(tegra->dev); - pm_runtime_enable(tegra->dev); + + if (tegra->padctl_irq) { + device_init_wakeup(tegra->dev, true); + pm_runtime_enable(tegra->dev); + }
return 0;
@@ -1772,7 +1787,9 @@ static int tegra_xusb_remove(struct platform_device *pdev) dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt, tegra->fw.phys);
- pm_runtime_disable(&pdev->dev); + if (tegra->padctl_irq) + pm_runtime_disable(&pdev->dev); + pm_runtime_put(&pdev->dev);
tegra_xusb_powergate_partitions(tegra); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index a484ff5e4ebf..546fce4617a8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */ + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index f45ca7ddf78e..a70fd86f735c 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -432,6 +432,7 @@ static int pl2303_detect_type(struct usb_serial *serial) case 0x200: switch (bcdDevice) { case 0x100: + case 0x105: case 0x305: case 0x405: /* diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index 7a2a17866a82..72f9001b0792 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc) ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK, FUSB_REG_MASK_BC_LVL | FUSB_REG_MASK_COMP_CHNG, - FUSB_REG_MASK_COMP_CHNG); + FUSB_REG_MASK_BC_LVL); if (ret < 0) { fusb302_log(chip, "cannot set SRC interrupt, ret=%d", ret); goto done; } chip->intr_comp_chng = true; + chip->intr_bc_lvl = false; break; case TYPEC_CC_RD: ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK, FUSB_REG_MASK_BC_LVL | FUSB_REG_MASK_COMP_CHNG, - FUSB_REG_MASK_BC_LVL); + FUSB_REG_MASK_COMP_CHNG); if (ret < 0) { fusb302_log(chip, "cannot set SRC interrupt, ret=%d", ret); goto done; } chip->intr_bc_lvl = true; + chip->intr_comp_chng = false; break; default: break; diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 5f484fff8dbe..41b0cd17fcba 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -591,8 +591,11 @@ static void vdpasim_free(struct vdpa_device *vdpa) vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); }
- put_iova_domain(&vdpasim->iova); - iova_cache_put(); + if (vdpa_get_dma_dev(vdpa)) { + put_iova_domain(&vdpasim->iova); + iova_cache_put(); + } + kvfree(vdpasim->buffer); if (vdpasim->iommu) vhost_iotlb_free(vdpasim->iommu); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 938aefbc75ec..4e3b95af7ee4 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -554,7 +554,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) virtio_transport_free_pkt(pkt);
len += sizeof(pkt->hdr); - vhost_add_used(vq, head, len); + vhost_add_used(vq, head, 0); total_len += len; added = true; } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index bd003ca8acbe..fe360c33ce71 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -909,7 +909,7 @@ static struct notifier_block xenbus_resume_nb = {
static int __init xenbus_init(void) { - int err = 0; + int err; uint64_t v = 0; xen_store_domain_type = XS_UNKNOWN;
@@ -949,6 +949,29 @@ static int __init xenbus_init(void) err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; + /* + * Uninitialized hvm_params are zero and return no error. + * Although it is theoretically possible to have + * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is + * not zero when valid. If zero, it means that Xenstore hasn't + * been properly initialized. Instead of attempting to map a + * wrong guest physical address return error. + * + * Also recognize all bits set as an invalid value. + */ + if (!v || !~v) { + err = -ENOENT; + goto out_error; + } + /* Avoid truncation on 32-bit. */ +#if BITS_PER_LONG == 32 + if (v > ULONG_MAX) { + pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n", + __func__, v); + err = -EINVAL; + goto out_error; + } +#endif xen_store_gfn = (unsigned long)v; xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, @@ -983,8 +1006,10 @@ static int __init xenbus_init(void) */ proc_create_mount_point("xen"); #endif + return 0;
out_error: + xen_store_domain_type = XS_UNKNOWN; return err; }
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index fd8742bae847..202ddde3d62a 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -52,8 +52,7 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry)); struct ceph_mon_client *monc = &fsc->client->monc; struct ceph_statfs st; - u64 fsid; - int err; + int i, err; u64 data_pool;
if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) { @@ -99,12 +98,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_namelen = NAME_MAX;
/* Must convert the fsid, for consistent values across arches */ + buf->f_fsid.val[0] = 0; mutex_lock(&monc->mutex); - fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^ - le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1)); + for (i = 0 ; i < sizeof(monc->monmap->fsid) / sizeof(__le32) ; ++i) + buf->f_fsid.val[0] ^= le32_to_cpu(((__le32 *)&monc->monmap->fsid)[i]); mutex_unlock(&monc->mutex);
- buf->f_fsid = u64_to_fsid(fsid); + /* fold the fs_cluster_id into the upper bits */ + buf->f_fsid.val[1] = monc->fs_cluster_id;
return 0; } diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index de2c12bcfa4b..905a901f7f80 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -358,6 +358,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) seq_printf(m, " signed"); if (server->posix_ext_supported) seq_printf(m, " posix"); + if (server->nosharesock) + seq_printf(m, " nosharesock");
if (server->rdma) seq_printf(m, "\nRDMA "); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index dea4c929d3f4..3e5b8e177cfa 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -592,6 +592,7 @@ struct TCP_Server_Info { struct list_head pending_mid_q; bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ + bool nosharesock; bool tcp_nodelay; unsigned int credits; /* send no more requests at once */ unsigned int max_credits; /* can override large 32000 default at mnt */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index e757ee52cc77..439f02f1886c 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1220,6 +1220,10 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context * if (ctx->nosharesock) return 0;
+ /* this server does not share socket */ + if (server->nosharesock) + return 0; + /* If multidialect negotiation see if existing sessions match one */ if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { if (server->vals->protocol_id < SMB30_PROT_ID) @@ -1370,6 +1374,9 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx) goto out_err; }
+ if (ctx->nosharesock) + tcp_ses->nosharesock = true; + tcp_ses->ops = ctx->ops; tcp_ses->vals = ctx->vals; cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index bd86067a63f7..3ca703cd5b24 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -141,7 +141,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, * however in order to avoid some race conditions, add a * DBG_BUGON to observe this in advance. */ - DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp); + DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
/* last refcount should be connected with its managed pslot. */ erofs_workgroup_unfreeze(grp, 0); @@ -156,15 +156,19 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, unsigned int freed = 0; unsigned long index;
+ xa_lock(&sbi->managed_pslots); xa_for_each(&sbi->managed_pslots, index, grp) { /* try to shrink each valid workgroup */ if (!erofs_try_to_release_workgroup(sbi, grp)) continue; + xa_unlock(&sbi->managed_pslots);
++freed; if (!--nr_shrink) - break; + return freed; + xa_lock(&sbi->managed_pslots); } + xa_unlock(&sbi->managed_pslots); return freed; }
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 83e9bc0f91ff..7b0282724231 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1162,7 +1162,8 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi) if (!is_journalled_quota(sbi)) return false;
- down_write(&sbi->quota_sem); + if (!down_write_trylock(&sbi->quota_sem)) + return true; if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { ret = false; } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) { diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index e863136081b4..556fcd8457f3 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1443,6 +1443,7 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, nid, nid_of_node(page), ino_of_node(page), ofs_of_node(page), cpver_of_node(page), next_blkaddr_of_node(page)); + set_sbi_flag(sbi, SBI_NEED_FSCK); err = -EINVAL; out_err: ClearPageUptodate(page); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 5a1f142bdb48..a9d21b33da9c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -847,17 +847,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
replace_page_cache_page(oldpage, newpage);
+ get_page(newpage); + + if (!(buf->flags & PIPE_BUF_FLAG_LRU)) + lru_cache_add(newpage); + /* * Release while we have extra ref on stolen page. Otherwise * anon_pipe_buf_release() might think the page can be reused. */ pipe_buf_release(cs->pipe, buf);
- get_page(newpage); - - if (!(buf->flags & PIPE_BUF_FLAG_LRU)) - lru_cache_add(newpage); - err = 0; spin_lock(&cs->req->waitq.lock); if (test_bit(FR_ABORTED, &cs->req->flags)) diff --git a/fs/io_uring.c b/fs/io_uring.c index 365f8b350b7f..f8ceddafb6fc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1204,6 +1204,7 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
static bool io_match_task(struct io_kiocb *head, struct task_struct *task, bool cancel_all) + __must_hold(&req->ctx->timeout_lock) { struct io_kiocb *req;
@@ -1219,6 +1220,44 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task, return false; }
+static bool io_match_linked(struct io_kiocb *head) +{ + struct io_kiocb *req; + + io_for_each_link(req, head) { + if (req->flags & REQ_F_INFLIGHT) + return true; + } + return false; +} + +/* + * As io_match_task() but protected against racing with linked timeouts. + * User must not hold timeout_lock. + */ +static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, + bool cancel_all) +{ + bool matched; + + if (task && head->task != task) + return false; + if (cancel_all) + return true; + + if (head->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = head->ctx; + + /* protect against races with linked timeouts */ + spin_lock_irq(&ctx->timeout_lock); + matched = io_match_linked(head); + spin_unlock_irq(&ctx->timeout_lock); + } else { + matched = io_match_linked(head); + } + return matched; +} + static inline void req_set_fail(struct io_kiocb *req) { req->flags |= REQ_F_FAIL; @@ -1430,10 +1469,10 @@ static void io_prep_async_link(struct io_kiocb *req) if (req->flags & REQ_F_LINK_TIMEOUT) { struct io_ring_ctx *ctx = req->ctx;
- spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); io_for_each_link(cur, req) io_prep_async_work(cur); - spin_unlock(&ctx->completion_lock); + spin_unlock_irq(&ctx->timeout_lock); } else { io_for_each_link(cur, req) io_prep_async_work(cur); @@ -4304,6 +4343,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, kfree(nxt); if (++i == nbufs) return i; + cond_resched(); } i++; kfree(buf); @@ -5702,7 +5742,7 @@ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
list = &ctx->cancel_hash[i]; hlist_for_each_entry_safe(req, tmp, list, hash_node) { - if (io_match_task(req, tsk, cancel_all)) + if (io_match_task_safe(req, tsk, cancel_all)) posted += io_poll_remove_one(req); } } @@ -6884,10 +6924,11 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx, static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) { struct io_kiocb *prev = req->timeout.prev; - int ret; + int ret = -ENOENT;
if (prev) { - ret = io_try_cancel_userdata(req, prev->user_data); + if (!(req->task->flags & PF_EXITING)) + ret = io_try_cancel_userdata(req, prev->user_data); io_req_complete_post(req, ret ?: -ETIME, 0); io_put_req(prev); } else { @@ -9209,10 +9250,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) struct io_buffer *buf; unsigned long index;
- xa_for_each(&ctx->io_buffers, index, buf) { + xa_for_each(&ctx->io_buffers, index, buf) __io_remove_buffers(ctx, buf, index, -1U); - cond_resched(); - } }
static void io_req_cache_free(struct list_head *list) @@ -9517,19 +9556,8 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_task_cancel *cancel = data; - bool ret;
- if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) { - struct io_ring_ctx *ctx = req->ctx; - - /* protect against races with linked timeouts */ - spin_lock(&ctx->completion_lock); - ret = io_match_task(req, cancel->task, cancel->all); - spin_unlock(&ctx->completion_lock); - } else { - ret = io_match_task(req, cancel->task, cancel->all); - } - return ret; + return io_match_task_safe(req, cancel->task, cancel->all); }
static bool io_cancel_defer_files(struct io_ring_ctx *ctx, @@ -9540,7 +9568,7 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
spin_lock(&ctx->completion_lock); list_for_each_entry_reverse(de, &ctx->defer_list, list) { - if (io_match_task(de->req, task, cancel_all)) { + if (io_match_task_safe(de->req, task, cancel_all)) { list_cut_position(&list, &ctx->defer_list, &de->list); break; } diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 9cc5798423d1..97119ec3b850 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -256,8 +256,13 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, unsigned poff, plen; sector_t sector;
- if (iomap->type == IOMAP_INLINE) - return min(iomap_read_inline_data(iter, page), length); + if (iomap->type == IOMAP_INLINE) { + loff_t ret = iomap_read_inline_data(iter, page); + + if (ret < 0) + return ret; + return 0; + }
/* zero post-eof blocks as the page may be mapped */ iop = iomap_page_create(iter->inode, page); @@ -370,6 +375,8 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter, ctx->cur_page_in_bio = false; } ret = iomap_readpage_iter(iter, ctx, done); + if (ret <= 0) + return ret; }
return done; diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 589694af4e95..ad0ea5d36f2e 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -1700,8 +1700,10 @@ int smb2_sess_setup(struct ksmbd_work *work) negblob_off = le16_to_cpu(req->SecurityBufferOffset); negblob_len = le16_to_cpu(req->SecurityBufferLength); if (negblob_off < (offsetof(struct smb2_sess_setup_req, Buffer) - 4) || - negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) - return -EINVAL; + negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) { + rc = -EINVAL; + goto out_err; + }
negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId + negblob_off); @@ -4450,6 +4452,12 @@ static void get_file_stream_info(struct ksmbd_work *work, &stat); file_info = (struct smb2_file_stream_info *)rsp->Buffer;
+ buf_free_len = + smb2_calc_max_out_buf_len(work, 8, + le32_to_cpu(req->OutputBufferLength)); + if (buf_free_len < 0) + goto out; + xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list); if (xattr_list_len < 0) { goto out; @@ -4458,12 +4466,6 @@ static void get_file_stream_info(struct ksmbd_work *work, goto out; }
- buf_free_len = - smb2_calc_max_out_buf_len(work, 8, - le32_to_cpu(req->OutputBufferLength)); - if (buf_free_len < 0) - goto out; - while (idx < xattr_list_len) { stream_name = xattr_list + idx; streamlen = strlen(stream_name); @@ -4489,8 +4491,10 @@ static void get_file_stream_info(struct ksmbd_work *work, ":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
next = sizeof(struct smb2_file_stream_info) + streamlen * 2; - if (next > buf_free_len) + if (next > buf_free_len) { + kfree(stream_buf); break; + }
file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes]; streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName, @@ -4507,6 +4511,7 @@ static void get_file_stream_info(struct ksmbd_work *work, file_info->NextEntryOffset = cpu_to_le32(next); }
+out: if (!S_ISDIR(stat.mode) && buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) { file_info = (struct smb2_file_stream_info *) @@ -4515,14 +4520,13 @@ static void get_file_stream_info(struct ksmbd_work *work, "::$DATA", 7, conn->local_nls, 0); streamlen *= 2; file_info->StreamNameLength = cpu_to_le32(streamlen); - file_info->StreamSize = 0; - file_info->StreamAllocationSize = 0; + file_info->StreamSize = cpu_to_le64(stat.size); + file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9); nbytes += sizeof(struct smb2_file_stream_info) + streamlen; }
/* last entry offset should be 0 */ file_info->NextEntryOffset = 0; -out: kvfree(xattr_list);
rsp->OutputBufferLength = cpu_to_le32(nbytes); @@ -5060,7 +5064,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work, if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | PROTECTED_DACL_SECINFO | UNPROTECTED_DACL_SECINFO)) { - pr_err("Unsupported addition info: 0x%x)\n", + ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n", addition_info);
pntsd->revision = cpu_to_le16(1); diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index a24349512ffe..9865b5c37d88 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -285,7 +285,9 @@ static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len) loff_t newsize = pos + len; loff_t end = newsize - 1;
- truncate_pagecache_range(inode, pos, end); + WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping, + pos >> PAGE_SHIFT, end >> PAGE_SHIFT)); + spin_lock(&inode->i_lock); if (newsize > i_size_read(inode)) i_size_write(inode, newsize); diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index c8bad735e4c1..271e5f92ed01 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -1434,8 +1434,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp, status = decode_clone(xdr); if (status) goto out; - status = decode_getfattr(xdr, res->dst_fattr, res->server); - + decode_getfattr(xdr, res->dst_fattr, res->server); out: res->rpc_status = status; return status; diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 9a15334da208..e5730986758f 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -124,9 +124,13 @@ ssize_t read_from_oldmem(char *buf, size_t count, nr_bytes = count;
/* If pfn is not ram, return zeros for sparse dump files */ - if (pfn_is_ram(pfn) == 0) - memset(buf, 0, nr_bytes); - else { + if (pfn_is_ram(pfn) == 0) { + tmp = 0; + if (!userbuf) + memset(buf, 0, nr_bytes); + else if (clear_user(buf, nr_bytes)) + tmp = -EFAULT; + } else { if (encrypted) tmp = copy_oldmem_page_encrypted(pfn, buf, nr_bytes, @@ -135,10 +139,10 @@ ssize_t read_from_oldmem(char *buf, size_t count, else tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf); - - if (tmp < 0) - return tmp; } + if (tmp < 0) + return tmp; + *ppos += nr_bytes; count -= nr_bytes; buf += nr_bytes; diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index c412dde4d67d..83b8070d1cc9 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -485,6 +485,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void fib6_nh_release(struct fib6_nh *fib6_nh); +void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
int call_fib6_entry_notifiers(struct net *net, enum fib_event_type event_type, diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index afbce90c4480..45e0339be6fa 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -47,6 +47,7 @@ struct ipv6_stub { struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void (*fib6_nh_release)(struct fib6_nh *fib6_nh); + void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh); void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt); int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify); void (*fib6_rt_update)(struct net *net, struct fib6_info *rt, diff --git a/include/net/nl802154.h b/include/net/nl802154.h index ddcee128f5d9..145acb8f2509 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -19,6 +19,8 @@ * */
+#include <linux/types.h> + #define NL802154_GENL_NAME "nl802154"
enum nl802154_commands { @@ -150,10 +152,9 @@ enum nl802154_attrs { };
enum nl802154_iftype { - /* for backwards compatibility TODO */ - NL802154_IFTYPE_UNSPEC = -1, + NL802154_IFTYPE_UNSPEC = (~(__u32)0),
- NL802154_IFTYPE_NODE, + NL802154_IFTYPE_NODE = 0, NL802154_IFTYPE_MONITOR, NL802154_IFTYPE_COORD,
diff --git a/kernel/cpu.c b/kernel/cpu.c index 192e43a87407..407a2568f35e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -31,6 +31,7 @@ #include <linux/smpboot.h> #include <linux/relay.h> #include <linux/slab.h> +#include <linux/scs.h> #include <linux/percpu-rwsem.h> #include <linux/cpuset.h>
@@ -587,6 +588,12 @@ static int bringup_cpu(unsigned int cpu) struct task_struct *idle = idle_thread_get(cpu); int ret;
+ /* + * Reset stale stack state from the last time this CPU was online. + */ + scs_task_reset(idle); + kasan_unpoison_task_stack(idle); + /* * Some architectures have to walk the irq descriptors to * setup the vector space for the cpu which comes online. diff --git a/kernel/events/core.c b/kernel/events/core.c index 7162b600e7ea..2931faf92a76 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9729,6 +9729,9 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, continue; if (event->attr.config != entry->type) continue; + /* Cannot deliver synchronous signal to other task. */ + if (event->attr.sigtrap) + continue; if (perf_tp_event_match(event, &data, regs)) perf_swevent_event(event, count, &data, regs); } diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 29eea50a3e67..e63f740c2cc8 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -106,9 +106,9 @@ * atomic_long_cmpxchg() will be used to obtain writer lock. * * There are three places where the lock handoff bit may be set or cleared. - * 1) rwsem_mark_wake() for readers. - * 2) rwsem_try_write_lock() for writers. - * 3) Error path of rwsem_down_write_slowpath(). + * 1) rwsem_mark_wake() for readers -- set, clear + * 2) rwsem_try_write_lock() for writers -- set, clear + * 3) rwsem_del_waiter() -- clear * * For all the above cases, wait_lock will be held. A writer must also * be the first one in the wait_list to be eligible for setting the handoff @@ -335,6 +335,9 @@ struct rwsem_waiter { struct task_struct *task; enum rwsem_waiter_type type; unsigned long timeout; + + /* Writer only, not initialized in reader */ + bool handoff_set; }; #define rwsem_first_waiter(sem) \ list_first_entry(&sem->wait_list, struct rwsem_waiter, list) @@ -345,12 +348,6 @@ enum rwsem_wake_type { RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ };
-enum writer_wait_state { - WRITER_NOT_FIRST, /* Writer is not first in wait list */ - WRITER_FIRST, /* Writer is first in wait list */ - WRITER_HANDOFF /* Writer is first & handoff needed */ -}; - /* * The typical HZ value is either 250 or 1000. So set the minimum waiting * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait @@ -366,6 +363,31 @@ enum writer_wait_state { */ #define MAX_READERS_WAKEUP 0x100
+static inline void +rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) +{ + lockdep_assert_held(&sem->wait_lock); + list_add_tail(&waiter->list, &sem->wait_list); + /* caller will set RWSEM_FLAG_WAITERS */ +} + +/* + * Remove a waiter from the wait_list and clear flags. + * + * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of + * this function. Modify with care. + */ +static inline void +rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) +{ + lockdep_assert_held(&sem->wait_lock); + list_del(&waiter->list); + if (likely(!list_empty(&sem->wait_list))) + return; + + atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); +} + /* * handle the lock release when processes blocked on it that can now run * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must @@ -377,6 +399,8 @@ enum writer_wait_state { * preferably when the wait_lock is released * - woken process blocks are discarded from the list after having task zeroed * - writers are only marked woken if downgrading is false + * + * Implies rwsem_del_waiter() for all woken readers. */ static void rwsem_mark_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type, @@ -491,18 +515,25 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
adjustment = woken * RWSEM_READER_BIAS - adjustment; lockevent_cond_inc(rwsem_wake_reader, woken); + + oldcount = atomic_long_read(&sem->count); if (list_empty(&sem->wait_list)) { - /* hit end of list above */ + /* + * Combined with list_move_tail() above, this implies + * rwsem_del_waiter(). + */ adjustment -= RWSEM_FLAG_WAITERS; + if (oldcount & RWSEM_FLAG_HANDOFF) + adjustment -= RWSEM_FLAG_HANDOFF; + } else if (woken) { + /* + * When we've woken a reader, we no longer need to force + * writers to give up the lock and we can clear HANDOFF. + */ + if (oldcount & RWSEM_FLAG_HANDOFF) + adjustment -= RWSEM_FLAG_HANDOFF; }
- /* - * When we've woken a reader, we no longer need to force writers - * to give up the lock and we can clear HANDOFF. - */ - if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF)) - adjustment -= RWSEM_FLAG_HANDOFF; - if (adjustment) atomic_long_add(adjustment, &sem->count);
@@ -533,12 +564,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, * race conditions between checking the rwsem wait list and setting the * sem->count accordingly. * - * If wstate is WRITER_HANDOFF, it will make sure that either the handoff - * bit is set or the lock is acquired with handoff bit cleared. + * Implies rwsem_del_waiter() on success. */ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, - enum writer_wait_state wstate) + struct rwsem_waiter *waiter) { + bool first = rwsem_first_waiter(sem) == waiter; long count, new;
lockdep_assert_held(&sem->wait_lock); @@ -547,13 +578,19 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, do { bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
- if (has_handoff && wstate == WRITER_NOT_FIRST) - return false; + if (has_handoff) { + if (!first) + return false; + + /* First waiter inherits a previously set handoff bit */ + waiter->handoff_set = true; + }
new = count;
if (count & RWSEM_LOCK_MASK) { - if (has_handoff || (wstate != WRITER_HANDOFF)) + if (has_handoff || (!rt_task(waiter->task) && + !time_after(jiffies, waiter->timeout))) return false;
new |= RWSEM_FLAG_HANDOFF; @@ -570,9 +607,17 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, * We have either acquired the lock with handoff bit cleared or * set the handoff bit. */ - if (new & RWSEM_FLAG_HANDOFF) + if (new & RWSEM_FLAG_HANDOFF) { + waiter->handoff_set = true; + lockevent_inc(rwsem_wlock_handoff); return false; + }
+ /* + * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on + * success. + */ + list_del(&waiter->list); rwsem_set_owner(sem); return true; } @@ -953,7 +998,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat } adjustment += RWSEM_FLAG_WAITERS; } - list_add_tail(&waiter.list, &sem->wait_list); + rwsem_add_waiter(sem, &waiter);
/* we're now waiting on the lock, but no longer actively locking */ count = atomic_long_add_return(adjustment, &sem->count); @@ -999,11 +1044,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat return sem;
out_nolock: - list_del(&waiter.list); - if (list_empty(&sem->wait_list)) { - atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF, - &sem->count); - } + rwsem_del_waiter(sem, &waiter); raw_spin_unlock_irq(&sem->wait_lock); __set_current_state(TASK_RUNNING); lockevent_inc(rwsem_rlock_fail); @@ -1017,9 +1058,7 @@ static struct rw_semaphore * rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) { long count; - enum writer_wait_state wstate; struct rwsem_waiter waiter; - struct rw_semaphore *ret = sem; DEFINE_WAKE_Q(wake_q);
/* do optimistic spinning and steal lock if possible */ @@ -1035,16 +1074,13 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) waiter.task = current; waiter.type = RWSEM_WAITING_FOR_WRITE; waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; + waiter.handoff_set = false;
raw_spin_lock_irq(&sem->wait_lock); - - /* account for this before adding a new element to the list */ - wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST; - - list_add_tail(&waiter.list, &sem->wait_list); + rwsem_add_waiter(sem, &waiter);
/* we're now waiting on the lock */ - if (wstate == WRITER_NOT_FIRST) { + if (rwsem_first_waiter(sem) != &waiter) { count = atomic_long_read(&sem->count);
/* @@ -1080,13 +1116,16 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) /* wait until we successfully acquire the lock */ set_current_state(state); for (;;) { - if (rwsem_try_write_lock(sem, wstate)) { + if (rwsem_try_write_lock(sem, &waiter)) { /* rwsem_try_write_lock() implies ACQUIRE on success */ break; }
raw_spin_unlock_irq(&sem->wait_lock);
+ if (signal_pending_state(state, current)) + goto out_nolock; + /* * After setting the handoff bit and failing to acquire * the lock, attempt to spin on owner to accelerate lock @@ -1095,7 +1134,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) * In this case, we attempt to acquire the lock again * without sleeping. */ - if (wstate == WRITER_HANDOFF) { + if (waiter.handoff_set) { enum owner_state owner_state;
preempt_disable(); @@ -1106,66 +1145,26 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) goto trylock_again; }
- /* Block until there are no active lockers. */ - for (;;) { - if (signal_pending_state(state, current)) - goto out_nolock; - - schedule(); - lockevent_inc(rwsem_sleep_writer); - set_current_state(state); - /* - * If HANDOFF bit is set, unconditionally do - * a trylock. - */ - if (wstate == WRITER_HANDOFF) - break; - - if ((wstate == WRITER_NOT_FIRST) && - (rwsem_first_waiter(sem) == &waiter)) - wstate = WRITER_FIRST; - - count = atomic_long_read(&sem->count); - if (!(count & RWSEM_LOCK_MASK)) - break; - - /* - * The setting of the handoff bit is deferred - * until rwsem_try_write_lock() is called. - */ - if ((wstate == WRITER_FIRST) && (rt_task(current) || - time_after(jiffies, waiter.timeout))) { - wstate = WRITER_HANDOFF; - lockevent_inc(rwsem_wlock_handoff); - break; - } - } + schedule(); + lockevent_inc(rwsem_sleep_writer); + set_current_state(state); trylock_again: raw_spin_lock_irq(&sem->wait_lock); } __set_current_state(TASK_RUNNING); - list_del(&waiter.list); raw_spin_unlock_irq(&sem->wait_lock); lockevent_inc(rwsem_wlock); - - return ret; + return sem;
out_nolock: __set_current_state(TASK_RUNNING); raw_spin_lock_irq(&sem->wait_lock); - list_del(&waiter.list); - - if (unlikely(wstate == WRITER_HANDOFF)) - atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count); - - if (list_empty(&sem->wait_list)) - atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count); - else + rwsem_del_waiter(sem, &waiter); + if (!list_empty(&sem->wait_list)) rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); raw_spin_unlock_irq(&sem->wait_lock); wake_up_q(&wake_q); lockevent_inc(rwsem_wlock_fail); - return ERR_PTR(-EINTR); }
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 559acef3fddb..b0888e9224da 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -691,7 +691,7 @@ static int load_image_and_restore(void) goto Unlock;
error = swsusp_read(&flags); - swsusp_close(FMODE_READ); + swsusp_close(FMODE_READ | FMODE_EXCL); if (!error) error = hibernation_restore(flags & SF_PLATFORM_MODE);
@@ -981,7 +981,7 @@ static int software_resume(void) /* The snapshot device should not be opened while we're running */ if (!hibernate_acquire()) { error = -EBUSY; - swsusp_close(FMODE_READ); + swsusp_close(FMODE_READ | FMODE_EXCL); goto Unlock; }
@@ -1016,7 +1016,7 @@ static int software_resume(void) pm_pr_dbg("Hibernation image not present or could not be loaded.\n"); return error; Close_Finish: - swsusp_close(FMODE_READ); + swsusp_close(FMODE_READ | FMODE_EXCL); goto Finish; }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 779f27a4b46a..6f4625f8276f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8641,9 +8641,6 @@ void __init init_idle(struct task_struct *idle, int cpu) idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY; kthread_set_per_cpu(idle, cpu);
- scs_task_reset(idle); - kasan_unpoison_task_stack(idle); - #ifdef CONFIG_SMP /* * It's possible that init_idle() gets called multiple times on a task, @@ -8799,7 +8796,6 @@ void idle_task_exit(void) finish_arch_post_lock_switch(); }
- scs_task_reset(current); /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ }
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 5c71d32b2860..421374c304fc 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1360,14 +1360,26 @@ __event_trigger_test_discard(struct trace_event_file *file, if (eflags & EVENT_FILE_FL_TRIGGER_COND) *tt = event_triggers_call(file, buffer, entry, event);
- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || - (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && - !filter_match_preds(file->filter, entry))) { - __trace_event_discard_commit(buffer, event); - return true; - } + if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED | + EVENT_FILE_FL_FILTERED | + EVENT_FILE_FL_PID_FILTER)))) + return false; + + if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) + goto discard; + + if (file->flags & EVENT_FILE_FL_FILTERED && + !filter_match_preds(file->filter, entry)) + goto discard; + + if ((file->flags & EVENT_FILE_FL_PID_FILTER) && + trace_event_ignore_this_pid(file)) + goto discard;
return false; + discard: + __trace_event_discard_commit(buffer, event); + return true; }
/** diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index bb1123ef2a02..44d031ffe511 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2678,12 +2678,22 @@ static struct trace_event_file * trace_create_new_event(struct trace_event_call *call, struct trace_array *tr) { + struct trace_pid_list *no_pid_list; + struct trace_pid_list *pid_list; struct trace_event_file *file;
file = kmem_cache_alloc(file_cachep, GFP_TRACE); if (!file) return NULL;
+ pid_list = rcu_dereference_protected(tr->filtered_pids, + lockdep_is_held(&event_mutex)); + no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, + lockdep_is_held(&event_mutex)); + + if (pid_list || no_pid_list) + file->flags |= EVENT_FILE_FL_PID_FILTER; + file->event_call = call; file->tr = tr; atomic_set(&file->sm_ref, 0); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 0a5c0db3137e..f5f0039d31e5 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1313,6 +1313,7 @@ static int uprobe_perf_open(struct trace_event_call *call, return 0;
list_for_each_entry(pos, trace_probe_probe_list(tp), list) { + tu = container_of(pos, struct trace_uprobe, tp); err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); if (err) { uprobe_perf_close(call, event); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a3a0a5e994f5..abaa5d96ded2 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -184,9 +184,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack) if (err) goto out_unregister_netdev;
- /* Account for reference in struct vlan_dev_priv */ - dev_hold(real_dev); - vlan_stacked_transfer_operstate(real_dev, dev, vlan); linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index aeeb5f90417b..8602885c8a8e 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -615,6 +615,9 @@ static int vlan_dev_init(struct net_device *dev) if (!vlan->vlan_pcpu_stats) return -ENOMEM;
+ /* Get vlan's reference to real_dev */ + dev_hold(real_dev); + return 0; }
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index f2abc3152888..e4983f473a3c 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1697,7 +1697,7 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce coalesce; int ret;
- if (!dev->ethtool_ops->set_coalesce && !dev->ethtool_ops->get_coalesce) + if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce) return -EOPNOTSUPP;
ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 9e8100728d46..5dbd4b5505eb 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -1899,15 +1899,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh, /* if any FIB entries reference this nexthop, any dst entries * need to be regenerated */ -static void nh_rt_cache_flush(struct net *net, struct nexthop *nh) +static void nh_rt_cache_flush(struct net *net, struct nexthop *nh, + struct nexthop *replaced_nh) { struct fib6_info *f6i; + struct nh_group *nhg; + int i;
if (!list_empty(&nh->fi_list)) rt_cache_flush(net);
list_for_each_entry(f6i, &nh->f6i_list, nh_list) ipv6_stub->fib6_update_sernum(net, f6i); + + /* if an IPv6 group was replaced, we have to release all old + * dsts to make sure all refcounts are released + */ + if (!replaced_nh->is_group) + return; + + /* new dsts must use only the new nexthop group */ + synchronize_net(); + + nhg = rtnl_dereference(replaced_nh->nh_grp); + for (i = 0; i < nhg->num_nh; i++) { + struct nh_grp_entry *nhge = &nhg->nh_entries[i]; + struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info); + + if (nhi->family == AF_INET6) + ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh); + } }
static int replace_nexthop_grp(struct net *net, struct nexthop *old, @@ -2247,7 +2268,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old, err = replace_nexthop_single(net, old, new, extack);
if (!err) { - nh_rt_cache_flush(net, old); + nh_rt_cache_flush(net, old, new);
__remove_nexthop(net, new, NULL); nexthop_put(new); @@ -2544,11 +2565,15 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh, /* sets nh_dev if successful */ err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL, extack); - if (err) + if (err) { + /* IPv6 is not enabled, don't call fib6_nh_release */ + if (err == -EAFNOSUPPORT) + goto out; ipv6_stub->fib6_nh_release(fib6_nh); - else + } else { nh->nh_flags = fib6_nh->fib_nh_flags; - + } +out: return err; }
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 4a30deaa9a37..8d2d4d652f6d 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -328,8 +328,6 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) return;
if (tcp_in_slow_start(tp)) { - if (hystart && after(ack, ca->end_seq)) - bictcp_hystart_reset(sk); acked = tcp_slow_start(tp, acked); if (!acked) return; @@ -389,6 +387,9 @@ static void hystart_update(struct sock *sk, u32 delay) struct bictcp *ca = inet_csk_ca(sk); u32 threshold;
+ if (after(tp->snd_una, ca->end_seq)) + bictcp_hystart_reset(sk); + if (hystart_detect & HYSTART_ACK_TRAIN) { u32 now = bictcp_clock_us(sk);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 0c4da163535a..dab4a047590b 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -1026,6 +1026,7 @@ static const struct ipv6_stub ipv6_stub_impl = { .ip6_mtu_from_fib6 = ip6_mtu_from_fib6, .fib6_nh_init = fib6_nh_init, .fib6_nh_release = fib6_nh_release, + .fib6_nh_release_dsts = fib6_nh_release_dsts, .fib6_update_sernum = fib6_update_sernum_stub, .fib6_rt_update = fib6_rt_update, .ip6_del_rt = ip6_del_rt, diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2f044a49afa8..ff4e83e2a506 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -174,7 +174,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) /* Policy lookup after SNAT yielded a new policy */ if (skb_dst(skb)->xfrm) { - IPCB(skb)->flags |= IPSKB_REROUTED; + IP6CB(skb)->flags |= IP6SKB_REROUTED; return dst_output(net, sk, skb); } #endif diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9b9ef09382ab..79cb5e5a4948 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3680,6 +3680,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh) fib_nh_common_release(&fib6_nh->nh_common); }
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh) +{ + int cpu; + + if (!fib6_nh->rt6i_pcpu) + return; + + for_each_possible_cpu(cpu) { + struct rt6_info *pcpu_rt, **ppcpu_rt; + + ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu); + pcpu_rt = xchg(ppcpu_rt, NULL); + if (pcpu_rt) { + dst_dev_put(&pcpu_rt->dst); + dst_release(&pcpu_rt->dst); + } + } +} + static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 350348f07070..0966855a7c25 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -422,28 +422,6 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, return false; }
-/* MP_JOIN client subflow must wait for 4th ack before sending any data: - * TCP can't schedule delack timer before the subflow is fully established. - * MPTCP uses the delack timer to do 3rd ack retransmissions - */ -static void schedule_3rdack_retransmission(struct sock *sk) -{ - struct inet_connection_sock *icsk = inet_csk(sk); - struct tcp_sock *tp = tcp_sk(sk); - unsigned long timeout; - - /* reschedule with a timeout above RTT, as we must look only for drop */ - if (tp->srtt_us) - timeout = tp->srtt_us << 1; - else - timeout = TCP_TIMEOUT_INIT; - - WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); - icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; - icsk->icsk_ack.timeout = timeout; - sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); -} - static void clear_3rdack_retransmission(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -526,7 +504,15 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, *size = TCPOLEN_MPTCP_MPJ_ACK; pr_debug("subflow=%p", subflow);
- schedule_3rdack_retransmission(sk); + /* we can use the full delegate action helper only from BH context + * If we are in process context - sk is flushing the backlog at + * socket lock release time - just set the appropriate flag, will + * be handled by the release callback + */ + if (sock_owned_by_user(sk)) + set_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status); + else + mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_ACK); return true; } return false; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 4379d69aead7..421fa62ce5cd 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1621,7 +1621,8 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk) if (!xmit_ssk) goto out; if (xmit_ssk != ssk) { - mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk)); + mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), + MPTCP_DELEGATE_SEND); goto out; }
@@ -2959,7 +2960,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk) if (xmit_ssk == ssk) __mptcp_subflow_push_pending(sk, ssk); else if (xmit_ssk) - mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk)); + mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND); } else { set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); } @@ -3013,18 +3014,50 @@ static void mptcp_release_cb(struct sock *sk) __mptcp_update_rmem(sk); }
+/* MP_JOIN client subflow must wait for 4th ack before sending any data: + * TCP can't schedule delack timer before the subflow is fully established. + * MPTCP uses the delack timer to do 3rd ack retransmissions + */ +static void schedule_3rdack_retransmission(struct sock *ssk) +{ + struct inet_connection_sock *icsk = inet_csk(ssk); + struct tcp_sock *tp = tcp_sk(ssk); + unsigned long timeout; + + if (mptcp_subflow_ctx(ssk)->fully_established) + return; + + /* reschedule with a timeout above RTT, as we must look only for drop */ + if (tp->srtt_us) + timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1)); + else + timeout = TCP_TIMEOUT_INIT; + timeout += jiffies; + + WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); + icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; + icsk->icsk_ack.timeout = timeout; + sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); +} + void mptcp_subflow_process_delegated(struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct sock *sk = subflow->conn;
- mptcp_data_lock(sk); - if (!sock_owned_by_user(sk)) - __mptcp_subflow_push_pending(sk, ssk); - else - set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); - mptcp_data_unlock(sk); - mptcp_subflow_delegated_done(subflow); + if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) { + mptcp_data_lock(sk); + if (!sock_owned_by_user(sk)) + __mptcp_subflow_push_pending(sk, ssk); + else + set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); + mptcp_data_unlock(sk); + mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND); + } + if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) { + schedule_3rdack_retransmission(ssk); + mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK); + } }
static int mptcp_hash(struct sock *sk) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index dc984676c5eb..82c5dc4d6b49 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -401,6 +401,7 @@ struct mptcp_delegated_action { DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
#define MPTCP_DELEGATE_SEND 0 +#define MPTCP_DELEGATE_ACK 1
/* MPTCP subflow context */ struct mptcp_subflow_context { @@ -506,23 +507,23 @@ static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk,
void mptcp_subflow_process_delegated(struct sock *ssk);
-static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow) +static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action) { struct mptcp_delegated_action *delegated; bool schedule;
+ /* the caller held the subflow bh socket lock */ + lockdep_assert_in_softirq(); + /* The implied barrier pairs with mptcp_subflow_delegated_done(), and * ensures the below list check sees list updates done prior to status * bit changes */ - if (!test_and_set_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) { + if (!test_and_set_bit(action, &subflow->delegated_status)) { /* still on delegated list from previous scheduling */ if (!list_empty(&subflow->delegated_node)) return;
- /* the caller held the subflow bh socket lock */ - lockdep_assert_in_softirq(); - delegated = this_cpu_ptr(&mptcp_delegated_actions); schedule = list_empty(&delegated->head); list_add_tail(&subflow->delegated_node, &delegated->head); @@ -547,16 +548,16 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow) { - return test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status); + return !!READ_ONCE(subflow->delegated_status); }
-static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow) +static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action) { /* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before * touching the status bit */ smp_wmb(); - clear_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status); + clear_bit(action, &subflow->delegated_status); }
int mptcp_is_enabled(const struct net *net); diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c index ba9ae482141b..dda8b76b7798 100644 --- a/net/ncsi/ncsi-cmd.c +++ b/net/ncsi/ncsi-cmd.c @@ -18,6 +18,8 @@ #include "internal.h" #include "ncsi-pkt.h"
+static const int padding_bytes = 26; + u32 ncsi_calculate_checksum(unsigned char *data, int len) { u32 checksum = 0; @@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb, { struct ncsi_cmd_oem_pkt *cmd; unsigned int len; + int payload; + /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2 + * requires payload to be padded with 0 to + * 32-bit boundary before the checksum field. + * Ensure the padding bytes are accounted for in + * skb allocation + */
+ payload = ALIGN(nca->payload, 4); len = sizeof(struct ncsi_cmd_pkt_hdr) + 4; - if (nca->payload < 26) - len += 26; - else - len += nca->payload; + len += max(payload, padding_bytes);
cmd = skb_put_zero(skb, len); memcpy(&cmd->mfr_id, nca->data, nca->payload); @@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca) struct net_device *dev = nd->dev; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; + int payload; int len = hlen + tlen; struct sk_buff *skb; struct ncsi_request *nr; @@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca) return NULL;
/* NCSI command packet has 16-bytes header, payload, 4 bytes checksum. + * Payload needs padding so that the checksum field following payload is + * aligned to 32-bit boundary. * The packet needs padding if its payload is less than 26 bytes to * meet 64 bytes minimal ethernet frame length. */ len += sizeof(struct ncsi_cmd_pkt_hdr) + 4; - if (nca->payload < 26) - len += 26; - else - len += nca->payload; + payload = ALIGN(nca->payload, 4); + len += max(payload, padding_bytes);
/* Allocate skb */ skb = alloc_skb(len, GFP_ATOMIC); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 128690c512df..393058a43aa7 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1964,7 +1964,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int struct ip_vs_proto_data *pd; struct ip_vs_conn *cp; int ret, pkts; - int conn_reuse_mode; struct sock *sk;
/* Already marked as IPVS request or reply? */ @@ -2041,15 +2040,16 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto, ipvs, af, skb, &iph);
- conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); - if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) { + if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) { + int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); bool old_ct = false, resched = false;
if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && unlikely(!atomic_read(&cp->dest->weight))) { resched = true; old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); - } else if (is_new_conn_expected(cp, conn_reuse_mode)) { + } else if (conn_reuse_mode && + is_new_conn_expected(cp, conn_reuse_mode)) { old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); if (!atomic_read(&cp->n_control)) { resched = true; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index f1e5443fe7c7..c7708bde057c 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1011,11 +1011,9 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) CTA_TUPLE_REPLY, filter->family, &filter->zone, - filter->orig_flags); - if (err < 0) { - err = -EINVAL; + filter->reply_flags); + if (err < 0) goto err_filter; - } }
return filter; diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index d6bf1b2cd541..b561e0a44a45 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -65,11 +65,11 @@ static void nf_flow_rule_lwt_match(struct nf_flow_match *match, sizeof(struct in6_addr)); if (memcmp(&key->enc_ipv6.src, &in6addr_any, sizeof(struct in6_addr))) - memset(&key->enc_ipv6.src, 0xff, + memset(&mask->enc_ipv6.src, 0xff, sizeof(struct in6_addr)); if (memcmp(&key->enc_ipv6.dst, &in6addr_any, sizeof(struct in6_addr))) - memset(&key->enc_ipv6.dst, 0xff, + memset(&mask->enc_ipv6.dst, 0xff, sizeof(struct in6_addr)); enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS); key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 1f857ffd1ac2..92a686807971 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -667,12 +667,14 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, q->classes[i].deficit = quanta[i]; } } + for (i = q->nbands; i < oldbands; i++) { + qdisc_tree_flush_backlog(q->classes[i].qdisc); + if (i >= q->nstrict) + list_del(&q->classes[i].alist); + } q->nstrict = nstrict; memcpy(q->prio2band, priomap, sizeof(priomap));
- for (i = q->nbands; i < oldbands; i++) - qdisc_tree_flush_backlog(q->classes[i].qdisc); - for (i = 0; i < q->nbands; i++) q->classes[i].quantum = quanta[i];
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 32c1c7ce856d..3d8219e3b026 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1918,8 +1918,10 @@ static int smc_listen(struct socket *sock, int backlog) smc->clcsock->sk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); rc = kernel_listen(smc->clcsock, backlog); - if (rc) + if (rc) { + smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; goto out; + } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = SMC_LISTEN; @@ -2152,8 +2154,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, static int smc_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; + bool do_shutdown = true; struct smc_sock *smc; int rc = -EINVAL; + int old_state; int rc1 = 0;
smc = smc_sk(sk); @@ -2180,7 +2184,11 @@ static int smc_shutdown(struct socket *sock, int how) } switch (how) { case SHUT_RDWR: /* shutdown in both directions */ + old_state = sk->sk_state; rc = smc_close_active(smc); + if (old_state == SMC_ACTIVE && + sk->sk_state == SMC_PEERCLOSEWAIT1) + do_shutdown = false; break; case SHUT_WR: rc = smc_close_shutdown_write(smc); @@ -2190,7 +2198,7 @@ static int smc_shutdown(struct socket *sock, int how) /* nothing more to do because peer is not involved */ break; } - if (smc->clcsock) + if (do_shutdown && smc->clcsock) rc1 = kernel_sock_shutdown(smc->clcsock, how); /* map sock_shutdown_cmd constants to sk_shutdown value range */ sk->sk_shutdown |= how + 1; diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 0f9ffba07d26..04620b53b74a 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -228,6 +228,12 @@ int smc_close_active(struct smc_sock *smc) /* send close request */ rc = smc_close_final(conn); sk->sk_state = SMC_PEERCLOSEWAIT1; + + /* actively shutdown clcsock before peer close it, + * prevent peer from entering TIME_WAIT state. + */ + if (smc->clcsock && smc->clcsock->sk) + rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); } else { /* peer event has changed the state */ goto again; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index d672c0f0e247..508a14fc4f58 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1596,14 +1596,26 @@ static void smc_link_down_work(struct work_struct *work) mutex_unlock(&lgr->llc_conf_mutex); }
-/* Determine vlan of internal TCP socket. - * @vlan_id: address to store the determined vlan id into - */ +static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev, + struct netdev_nested_priv *priv) +{ + unsigned short *vlan_id = (unsigned short *)priv->data; + + if (is_vlan_dev(lower_dev)) { + *vlan_id = vlan_dev_vlan_id(lower_dev); + return 1; + } + + return 0; +} + +/* Determine vlan of internal TCP socket. */ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini) { struct dst_entry *dst = sk_dst_get(clcsock->sk); + struct netdev_nested_priv priv; struct net_device *ndev; - int i, nest_lvl, rc = 0; + int rc = 0;
ini->vlan_id = 0; if (!dst) { @@ -1621,20 +1633,9 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini) goto out_rel; }
+ priv.data = (void *)&ini->vlan_id; rtnl_lock(); - nest_lvl = ndev->lower_level; - for (i = 0; i < nest_lvl; i++) { - struct list_head *lower = &ndev->adj_list.lower; - - if (list_empty(lower)) - break; - lower = lower->next; - ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower); - if (is_vlan_dev(ndev)) { - ini->vlan_id = vlan_dev_vlan_id(ndev); - break; - } - } + netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv); rtnl_unlock();
out_rel: diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 9ab81db8a654..9aac9c60d786 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -61,7 +61,7 @@ static DEFINE_MUTEX(tcpv6_prot_mutex); static const struct proto *saved_tcpv4_prot; static DEFINE_MUTEX(tcpv4_prot_mutex); static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; -static struct proto_ops tls_sw_proto_ops; +static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], const struct proto *base);
@@ -71,6 +71,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
WRITE_ONCE(sk->sk_prot, &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]); + WRITE_ONCE(sk->sk_socket->ops, + &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]); }
int wait_on_pending_writer(struct sock *sk, long *timeo) @@ -581,8 +583,6 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, if (tx) { ctx->sk_write_space = sk->sk_write_space; sk->sk_write_space = tls_write_space; - } else { - sk->sk_socket->ops = &tls_sw_proto_ops; } goto out;
@@ -640,6 +640,39 @@ struct tls_context *tls_ctx_create(struct sock *sk) return ctx; }
+static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG], + const struct proto_ops *base) +{ + ops[TLS_BASE][TLS_BASE] = *base; + + ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; + ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked; + + ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE]; + ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read; + + ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE]; + ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read; + +#ifdef CONFIG_TLS_DEVICE + ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; + ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL; + + ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ]; + ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL; + + ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ]; + + ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ]; + + ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ]; + ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL; +#endif +#ifdef CONFIG_TLS_TOE + ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base; +#endif +} + static void tls_build_proto(struct sock *sk) { int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; @@ -651,6 +684,8 @@ static void tls_build_proto(struct sock *sk) mutex_lock(&tcpv6_prot_mutex); if (likely(prot != saved_tcpv6_prot)) { build_protos(tls_prots[TLSV6], prot); + build_proto_ops(tls_proto_ops[TLSV6], + sk->sk_socket->ops); smp_store_release(&saved_tcpv6_prot, prot); } mutex_unlock(&tcpv6_prot_mutex); @@ -661,6 +696,8 @@ static void tls_build_proto(struct sock *sk) mutex_lock(&tcpv4_prot_mutex); if (likely(prot != saved_tcpv4_prot)) { build_protos(tls_prots[TLSV4], prot); + build_proto_ops(tls_proto_ops[TLSV4], + sk->sk_socket->ops); smp_store_release(&saved_tcpv4_prot, prot); } mutex_unlock(&tcpv4_prot_mutex); @@ -871,10 +908,6 @@ static int __init tls_register(void) if (err) return err;
- tls_sw_proto_ops = inet_stream_ops; - tls_sw_proto_ops.splice_read = tls_sw_splice_read; - tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked; - tls_device_init(); tcp_register_ulp(&tcp_tls_ulp_ops);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 1b08b877a890..b0cdcea10180 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1993,6 +1993,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct sock *sk = sock->sk; struct sk_buff *skb; ssize_t copied = 0; + bool from_queue; int err = 0; long timeo; int chunk; @@ -2002,25 +2003,28 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
- skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err); - if (!skb) - goto splice_read_end; - - if (!ctx->decrypted) { - err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); - - /* splice does not support reading control messages */ - if (ctx->control != TLS_RECORD_TYPE_DATA) { - err = -EINVAL; + from_queue = !skb_queue_empty(&ctx->rx_list); + if (from_queue) { + skb = __skb_dequeue(&ctx->rx_list); + } else { + skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, + &err); + if (!skb) goto splice_read_end; - }
+ err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); if (err < 0) { tls_err_abort(sk, -EBADMSG); goto splice_read_end; } - ctx->decrypted = 1; } + + /* splice does not support reading control messages */ + if (ctx->control != TLS_RECORD_TYPE_DATA) { + err = -EINVAL; + goto splice_read_end; + } + rxm = strp_msg(skb);
chunk = min_t(unsigned int, rxm->full_len, len); @@ -2028,7 +2032,17 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, if (copied < 0) goto splice_read_end;
- tls_sw_advance_skb(sk, skb, copied); + if (!from_queue) { + ctx->recv_pkt = NULL; + __strp_unpause(&ctx->strp); + } + if (chunk < rxm->full_len) { + __skb_queue_head(&ctx->rx_list, skb); + rxm->offset += len; + rxm->full_len -= len; + } else { + consume_skb(skb); + }
splice_read_end: release_sock(sk); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 78e08e82c08c..b0bfc78e421c 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2882,9 +2882,6 @@ static int unix_shutdown(struct socket *sock, int mode)
unix_state_lock(sk); sk->sk_shutdown |= mode; - if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && - mode == SHUTDOWN_MASK) - sk->sk_state = TCP_CLOSE; other = unix_peer(sk); if (other) sock_hold(other); diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index b9ac9e9e45a4..10a0bffc3cf6 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -299,6 +299,15 @@ static const struct config_entry config_table[] = { }, #endif
+/* JasperLake */ +#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE) + { + .flags = FLAG_SOF, + .device = 0x4dc8, + .codec_hid = "ESSX8336", + }, +#endif + /* Tigerlake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE) { diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c index da6e6350ceaf..d074727c3e21 100644 --- a/sound/pci/ctxfi/ctamixer.c +++ b/sound/pci/ctxfi/ctamixer.c @@ -23,16 +23,15 @@
#define BLANK_SLOT 4094
-static int amixer_master(struct rsc *rsc) +static void amixer_master(struct rsc *rsc) { rsc->conj = 0; - return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0]; + rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0]; }
-static int amixer_next_conj(struct rsc *rsc) +static void amixer_next_conj(struct rsc *rsc) { rsc->conj++; - return container_of(rsc, struct amixer, rsc)->idx[rsc->conj]; }
static int amixer_index(const struct rsc *rsc) @@ -331,16 +330,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
/* SUM resource management */
-static int sum_master(struct rsc *rsc) +static void sum_master(struct rsc *rsc) { rsc->conj = 0; - return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0]; + rsc->idx = container_of(rsc, struct sum, rsc)->idx[0]; }
-static int sum_next_conj(struct rsc *rsc) +static void sum_next_conj(struct rsc *rsc) { rsc->conj++; - return container_of(rsc, struct sum, rsc)->idx[rsc->conj]; }
static int sum_index(const struct rsc *rsc) diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c index f589da045342..7fc720046ce2 100644 --- a/sound/pci/ctxfi/ctdaio.c +++ b/sound/pci/ctxfi/ctdaio.c @@ -51,12 +51,12 @@ static const struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = { [SPDIFIO] = {.left = 0x05, .right = 0x85}, };
-static int daio_master(struct rsc *rsc) +static void daio_master(struct rsc *rsc) { /* Actually, this is not the resource index of DAIO. * For DAO, it is the input mapper index. And, for DAI, * it is the output time-slot index. */ - return rsc->conj = rsc->idx; + rsc->conj = rsc->idx; }
static int daio_index(const struct rsc *rsc) @@ -64,19 +64,19 @@ static int daio_index(const struct rsc *rsc) return rsc->conj; }
-static int daio_out_next_conj(struct rsc *rsc) +static void daio_out_next_conj(struct rsc *rsc) { - return rsc->conj += 2; + rsc->conj += 2; }
-static int daio_in_next_conj_20k1(struct rsc *rsc) +static void daio_in_next_conj_20k1(struct rsc *rsc) { - return rsc->conj += 0x200; + rsc->conj += 0x200; }
-static int daio_in_next_conj_20k2(struct rsc *rsc) +static void daio_in_next_conj_20k2(struct rsc *rsc) { - return rsc->conj += 0x100; + rsc->conj += 0x100; }
static const struct rsc_ops daio_out_rsc_ops = { diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c index 81ad26934518..be1d3e61309c 100644 --- a/sound/pci/ctxfi/ctresource.c +++ b/sound/pci/ctxfi/ctresource.c @@ -109,18 +109,17 @@ static int audio_ring_slot(const struct rsc *rsc) return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type]; }
-static int rsc_next_conj(struct rsc *rsc) +static void rsc_next_conj(struct rsc *rsc) { unsigned int i; for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); ) i++; rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i); - return rsc->conj; }
-static int rsc_master(struct rsc *rsc) +static void rsc_master(struct rsc *rsc) { - return rsc->conj = rsc->idx; + rsc->conj = rsc->idx; }
static const struct rsc_ops rsc_generic_ops = { diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h index fdbfd808816d..58553bda44f4 100644 --- a/sound/pci/ctxfi/ctresource.h +++ b/sound/pci/ctxfi/ctresource.h @@ -39,8 +39,8 @@ struct rsc { };
struct rsc_ops { - int (*master)(struct rsc *rsc); /* Move to master resource */ - int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */ + void (*master)(struct rsc *rsc); /* Move to master resource */ + void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */ int (*index)(const struct rsc *rsc); /* Return the index of resource */ /* Return the output slot number */ int (*output_slot)(const struct rsc *rsc); diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c index bd4697b44233..4a94b4708a77 100644 --- a/sound/pci/ctxfi/ctsrc.c +++ b/sound/pci/ctxfi/ctsrc.c @@ -590,16 +590,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
/* SRCIMP resource manager operations */
-static int srcimp_master(struct rsc *rsc) +static void srcimp_master(struct rsc *rsc) { rsc->conj = 0; - return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0]; + rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0]; }
-static int srcimp_next_conj(struct rsc *rsc) +static void srcimp_next_conj(struct rsc *rsc) { rsc->conj++; - return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj]; }
static int srcimp_index(const struct rsc *rsc) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2f1727faec69..9ce7457533c9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6521,6 +6521,27 @@ static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *co alc_write_coef_idx(codec, 0x45, 0x5089); }
+static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = { + WRITE_COEF(0x1a, 0x9003), WRITE_COEF(0x1b, 0x0e2b), WRITE_COEF(0x37, 0xfe06), + WRITE_COEF(0x38, 0x4981), WRITE_COEF(0x45, 0xd489), WRITE_COEF(0x46, 0x0074), + WRITE_COEF(0x49, 0x0149), + {} +}; + +static void alc233_fixup_no_audio_jack(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + /* + * The audio jack input and output is not detected on the ASRock NUC Box + * 1100 series when cold booting without this fix. Warm rebooting from a + * certain other OS makes the audio functional, as COEF settings are + * preserved in this case. This fix sets these altered COEF values as + * the default. + */ + alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs); +} + enum { ALC269_FIXUP_GPIO2, ALC269_FIXUP_SONY_VAIO, @@ -6740,6 +6761,7 @@ enum { ALC287_FIXUP_13S_GEN2_SPEAKERS, ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS, ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, + ALC233_FIXUP_NO_AUDIO_JACK, };
static const struct hda_fixup alc269_fixups[] = { @@ -8460,6 +8482,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, }, + [ALC233_FIXUP_NO_AUDIO_JACK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc233_fixup_no_audio_jack, + }, };
static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -8639,6 +8665,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8728, "HP EliteBook 840 G7", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), @@ -8894,6 +8921,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), + SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20), SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI), diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c index 196b06898eeb..07894ec5e7a6 100644 --- a/sound/soc/codecs/lpass-rx-macro.c +++ b/sound/soc/codecs/lpass-rx-macro.c @@ -2188,7 +2188,7 @@ static int rx_macro_config_classh(struct snd_soc_component *component, snd_soc_component_update_bits(component, CDC_RX_CLSH_DECAY_CTRL, CDC_RX_CLSH_DECAY_RATE_MASK, 0x0); - snd_soc_component_update_bits(component, + snd_soc_component_write_field(component, CDC_RX_RX1_RX_PATH_CFG0, CDC_RX_RXn_CLSH_EN_MASK, 0x1); break; diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c index c496b359f2f4..4f568abd59e2 100644 --- a/sound/soc/codecs/wcd934x.c +++ b/sound/soc/codecs/wcd934x.c @@ -1896,9 +1896,8 @@ static int wcd934x_hw_params(struct snd_pcm_substream *substream, }
wcd->dai[dai->id].sconfig.rate = params_rate(params); - wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
- return 0; + return wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream); }
static int wcd934x_hw_free(struct snd_pcm_substream *substream, diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c index 52de7d14b139..67151c7770c6 100644 --- a/sound/soc/codecs/wcd938x.c +++ b/sound/soc/codecs/wcd938x.c @@ -1174,6 +1174,9 @@ static bool wcd938x_readonly_register(struct device *dev, unsigned int reg) case WCD938X_DIGITAL_INTR_STATUS_0: case WCD938X_DIGITAL_INTR_STATUS_1: case WCD938X_DIGITAL_INTR_STATUS_2: + case WCD938X_DIGITAL_INTR_CLEAR_0: + case WCD938X_DIGITAL_INTR_CLEAR_1: + case WCD938X_DIGITAL_INTR_CLEAR_2: case WCD938X_DIGITAL_SWR_HM_TEST_0: case WCD938X_DIGITAL_SWR_HM_TEST_1: case WCD938X_DIGITAL_EFUSE_T_DATA_0: diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c index 46f365528d50..b74b67720ef4 100644 --- a/sound/soc/qcom/qdsp6/q6asm-dai.c +++ b/sound/soc/qcom/qdsp6/q6asm-dai.c @@ -269,9 +269,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
if (ret < 0) { dev_err(dev, "%s: q6asm_open_write failed\n", __func__); - q6asm_audio_client_free(prtd->audio_client); - prtd->audio_client = NULL; - return -ENOMEM; + goto open_err; }
prtd->session_id = q6asm_get_session_id(prtd->audio_client); @@ -279,7 +277,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component, prtd->session_id, substream->stream); if (ret) { dev_err(dev, "%s: stream reg failed ret:%d\n", __func__, ret); - return ret; + goto routing_err; }
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { @@ -301,10 +299,19 @@ static int q6asm_dai_prepare(struct snd_soc_component *component, } if (ret < 0) dev_info(dev, "%s: CMD Format block failed\n", __func__); + else + prtd->state = Q6ASM_STREAM_RUNNING;
- prtd->state = Q6ASM_STREAM_RUNNING; + return ret;
- return 0; +routing_err: + q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE); +open_err: + q6asm_unmap_memory_regions(substream->stream, prtd->audio_client); + q6asm_audio_client_free(prtd->audio_client); + prtd->audio_client = NULL; + + return ret; }
static int q6asm_dai_trigger(struct snd_soc_component *component, diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index 3390ebef9549..243b8179e59d 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c @@ -495,7 +495,11 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol, session->port_id = be_id; snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update); } else { - session->port_id = -1; + if (session->port_id == be_id) { + session->port_id = -1; + return 0; + } + snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update); }
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index f6e5ac3e0314..7459956d62b9 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -2674,6 +2674,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); /* remove dynamic controls from the component driver */ int snd_soc_tplg_component_remove(struct snd_soc_component *comp) { + struct snd_card *card = comp->card->snd_card; struct snd_soc_dobj *dobj, *next_dobj; int pass = SOC_TPLG_PASS_END;
@@ -2681,6 +2682,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp) while (pass >= SOC_TPLG_PASS_START) {
/* remove mixer controls */ + down_write(&card->controls_rwsem); list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list, list) {
@@ -2719,6 +2721,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp) break; } } + up_write(&card->controls_rwsem); pass--; }
diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c index 30025d3c16b6..0862ff8b6627 100644 --- a/sound/soc/sof/intel/hda-bus.c +++ b/sound/soc/sof/intel/hda-bus.c @@ -10,6 +10,8 @@ #include <linux/io.h> #include <sound/hdaudio.h> #include <sound/hda_i915.h> +#include <sound/hda_codec.h> +#include <sound/hda_register.h> #include "../sof-priv.h" #include "hda.h"
@@ -21,6 +23,18 @@ #endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) +static void update_codec_wake_enable(struct hdac_bus *bus, unsigned int addr, bool link_power) +{ + unsigned int mask = snd_hdac_chip_readw(bus, WAKEEN); + + if (link_power) + mask &= ~BIT(addr); + else + mask |= BIT(addr); + + snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask); +} + static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable) { struct hdac_bus *bus = codec->bus; @@ -41,6 +55,9 @@ static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable) */ if (codec->addr == HDA_IDISP_ADDR && !enable) snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); + + /* WAKEEN needs to be set for disabled links */ + update_codec_wake_enable(bus, codec->addr, enable); }
static const struct hdac_bus_ops bus_core_ops = { diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c index 623cf291e207..262a70791a8f 100644 --- a/sound/soc/sof/intel/hda-dsp.c +++ b/sound/soc/sof/intel/hda-dsp.c @@ -623,8 +623,7 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend) hda_dsp_ipc_int_disable(sdev);
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) - if (runtime_suspend) - hda_codec_jack_wake_enable(sdev, true); + hda_codec_jack_wake_enable(sdev, runtime_suspend);
/* power down all hda link */ snd_hdac_ext_bus_link_power_down_all(bus); diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index f60e2c57d3d0..ef92cca7ae01 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -696,6 +696,20 @@ static int hda_init_caps(struct snd_sof_dev *sdev) return 0; }
+static void hda_check_for_state_change(struct snd_sof_dev *sdev) +{ +#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) + struct hdac_bus *bus = sof_to_bus(sdev); + unsigned int codec_mask; + + codec_mask = snd_hdac_chip_readw(bus, STATESTS); + if (codec_mask) { + hda_codec_jack_check(sdev); + snd_hdac_chip_writew(bus, STATESTS, codec_mask); + } +#endif +} + static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context) { struct snd_sof_dev *sdev = context; @@ -737,6 +751,8 @@ static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context) if (hda_sdw_check_wakeen_irq(sdev)) hda_sdw_process_wakeen(sdev);
+ hda_check_for_state_change(sdev); + /* enable GIE interrupt */ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL, diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c index 6254bacad6eb..717f45a83445 100644 --- a/sound/soc/stm/stm32_i2s.c +++ b/sound/soc/stm/stm32_i2s.c @@ -700,7 +700,7 @@ static int stm32_i2s_configure_clock(struct snd_soc_dai *cpu_dai, if (ret < 0) return ret;
- nb_bits = frame_len * ((cgfr & I2S_CGFR_CHLEN) + 1); + nb_bits = frame_len * (FIELD_GET(I2S_CGFR_CHLEN, cgfr) + 1); ret = stm32_i2s_calc_clk_div(i2s, i2s_clock_rate, (nb_bits * rate)); if (ret)