From: Hector Martin marcan@marcan.st
[ Upstream commit 91918ce88d9fef408bb12c46a27c73d79b604c20 ]
Newer Apple firmwares on chipsets without a hardware RNG require the host to provide a buffer of 256 random bytes to the device on initialization. This buffer is present immediately before NVRAM, suffixed by a footer containing a magic number and the buffer length.
This won't affect chips/firmwares that do not use this feature, so do it unconditionally for all Apple platforms (those with an Apple OTP).
Reviewed-by: Linus Walleij linus.walleij@linaro.org Signed-off-by: Hector Martin marcan@marcan.st Reviewed-by: Julian Calaby julian.calaby@gmail.com Signed-off-by: Kalle Valo kvalo@kernel.org Link: https://lore.kernel.org/r/20230214080034.3828-3-marcan@marcan.st Signed-off-by: Sasha Levin sashal@kernel.org --- .../broadcom/brcm80211/brcmfmac/pcie.c | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 6ee04af85e9d5..967183662d4b3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -23,6 +23,7 @@ #include <linux/bcma/bcma.h> #include <linux/sched.h> #include <linux/io.h> +#include <linux/random.h> #include <asm/unaligned.h>
#include <soc.h> @@ -1445,6 +1446,13 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, return 0; }
+struct brcmf_random_seed_footer { + __le32 length; + __le32 magic; +}; + +#define BRCMF_RANDOM_SEED_MAGIC 0xfeedc0de +#define BRCMF_RANDOM_SEED_LENGTH 0x100
static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, const struct firmware *fw, void *nvram, @@ -1480,6 +1488,30 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, nvram_len; memcpy_toio(devinfo->tcm + address, nvram, nvram_len); brcmf_fw_nvram_free(nvram); + + if (devinfo->otp.valid) { + size_t rand_len = BRCMF_RANDOM_SEED_LENGTH; + struct brcmf_random_seed_footer footer = { + .length = cpu_to_le32(rand_len), + .magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC), + }; + void *randbuf; + + /* Some Apple chips/firmwares expect a buffer of random + * data to be present before NVRAM + */ + brcmf_dbg(PCIE, "Download random seed\n"); + + address -= sizeof(footer); + memcpy_toio(devinfo->tcm + address, &footer, + sizeof(footer)); + + address -= rand_len; + randbuf = kzalloc(rand_len, GFP_KERNEL); + get_random_bytes(randbuf, rand_len); + memcpy_toio(devinfo->tcm + address, randbuf, rand_len); + kfree(randbuf); + } } else { brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", devinfo->nvram_name);
From: Hector Martin marcan@marcan.st
[ Upstream commit 89b89e52153fda2733562776c7c9d9d3ebf8dd6d ]
Apparently the hex passphrase mechanism does not work on newer chips/firmware (e.g. BCM4387). It seems there was a simple way of passing it in binary all along, so use that and avoid the hexification.
OpenBSD has been doing it like this from the beginning, so this should work on all chips.
Also clear the structure before setting the PMK. This was leaking uninitialized stack contents to the device.
Reviewed-by: Linus Walleij linus.walleij@linaro.org Reviewed-by: Arend van Spriel arend.vanspriel@broadcom.com Signed-off-by: Hector Martin marcan@marcan.st Signed-off-by: Kalle Valo kvalo@kernel.org Link: https://lore.kernel.org/r/20230214092423.15175-6-marcan@marcan.st Signed-off-by: Sasha Levin sashal@kernel.org --- .../wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 75790b13c9621..22fdc92fd6f88 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1238,13 +1238,14 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e) static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) { struct brcmf_wsec_pmk_le pmk; - int i, err; + int err; + + memset(&pmk, 0, sizeof(pmk));
- /* convert to firmware key format */ - pmk.key_len = cpu_to_le16(pmk_len << 1); - pmk.flags = cpu_to_le16(BRCMF_WSEC_PASSPHRASE); - for (i = 0; i < pmk_len; i++) - snprintf(&pmk.key[2 * i], 3, "%02x", pmk_data[i]); + /* pass pmk directly */ + pmk.key_len = cpu_to_le16(pmk_len); + pmk.flags = cpu_to_le16(0); + memcpy(pmk.key, pmk_data, pmk_len);
/* store psk in firmware */ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
From: Jan Kara jack@suse.cz
[ Upstream commit 62aeb94433fcec80241754b70d0d1836d5926b0a ]
Check that log of block size stored in the superblock has sensible value. Otherwise the shift computing the block size can overflow leading to undefined behavior.
Reported-by: syzbot+4fec412f59eba8c01b77@syzkaller.appspotmail.com Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ext2/ext2.h | 1 + fs/ext2/super.c | 7 +++++++ 2 files changed, 8 insertions(+)
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 00e759f051619..a203a5723e2c0 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -177,6 +177,7 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb) #define EXT2_MIN_BLOCK_SIZE 1024 #define EXT2_MAX_BLOCK_SIZE 4096 #define EXT2_MIN_BLOCK_LOG_SIZE 10 +#define EXT2_MAX_BLOCK_LOG_SIZE 16 #define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize) #define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32)) #define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits) diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 44a1f356aca29..3349ce85d27cb 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -978,6 +978,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; }
+ if (le32_to_cpu(es->s_log_block_size) > + (EXT2_MAX_BLOCK_LOG_SIZE - BLOCK_SIZE_BITS)) { + ext2_msg(sb, KERN_ERR, + "Invalid log block size: %u", + le32_to_cpu(es->s_log_block_size)); + goto failed_mount; + } blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
From: Nathan Chancellor nathan@kernel.org
[ Upstream commit c8384d4a51e7cb0e6587f3143f29099f202c5de1 ]
With clang's kernel control flow integrity (kCFI, CONFIG_CFI_CLANG), indirect call targets are validated against the expected function pointer prototype to make sure the call target is valid to help mitigate ROP attacks. If they are not identical, there is a failure at run time, which manifests as either a kernel panic or thread getting killed. A warning in clang aims to catch these at compile time, which reveals:
drivers/net/ethernet/pasemi/pasemi_mac.c:1665:21: error: incompatible function pointer types initializing 'netdev_tx_t (*)(struct sk_buff *, struct net_device *)' (aka 'enum netdev_tx (*)(struct sk_buff *, struct net_device *)') with an expression of type 'int (struct sk_buff *, struct net_device *)' [-Werror,-Wincompatible-function-pointer-types-strict] .ndo_start_xmit = pasemi_mac_start_tx, ^~~~~~~~~~~~~~~~~~~ 1 error generated.
->ndo_start_xmit() in 'struct net_device_ops' expects a return type of 'netdev_tx_t', not 'int'. Adjust the return type of pasemi_mac_start_tx() to match the prototype's to resolve the warning. While PowerPC does not currently implement support for kCFI, it could in the future, which means this warning becomes a fatal CFI failure at run time.
Link: https://github.com/ClangBuiltLinux/linux/issues/1750 Signed-off-by: Nathan Chancellor nathan@kernel.org Reviewed-by: Horatiu Vultur horatiu.vultur@microchip.com Link: https://lore.kernel.org/r/20230319-pasemi-incompatible-pointer-types-strict-... Signed-off-by: Paolo Abeni pabeni@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/pasemi/pasemi_mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index e2c280913fbbb..8238a70161599 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1435,7 +1435,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb, write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2); }
-static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct pasemi_mac * const mac = netdev_priv(dev); struct pasemi_mac_txring * const txring = tx_ring(mac);
From: Nick Child nnac123@linux.ibm.com
[ Upstream commit 5dd0dfd55baec0742ba8f5625a0dd064aca7db16 ]
When setting the XPS value of a TX queue, warn the user once if the index of the queue is greater than the number of allocated TX queues.
Previously, this scenario went uncaught. In the best case, it resulted in unnecessary allocations. In the worst case, it resulted in out-of-bounds memory references through calls to `netdev_get_tx_queue( dev, index)`. Therefore, it is important to inform the user but not worth returning an error and risk downing the netdevice.
Signed-off-by: Nick Child nnac123@linux.ibm.com Reviewed-by: Piotr Raczynski piotr.raczynski@intel.com Link: https://lore.kernel.org/r/20230321150725.127229-1-nnac123@linux.ibm.com Signed-off-by: Jakub Kicinski kuba@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/core/dev.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/net/core/dev.c b/net/core/dev.c index b778f35965433..03903d3f1d695 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2303,6 +2303,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, bool active = false; unsigned int nr_ids;
+ WARN_ON_ONCE(index >= dev->num_tx_queues); + if (dev->num_tc) { /* Do not allow XPS on subordinate device directly */ num_tc = dev->num_tc;
From: Eli Cohen elic@nvidia.com
[ Upstream commit 4e0473f1060aa49621d40a113afde24818101d37 ]
When calling irq_set_affinity_notifier() with NULL at the notify argument, it will cause freeing of the glue pointer in the corresponding array entry but will leave the pointer in the array. A subsequent call to free_irq_cpu_rmap() will try to free this entry again leading to possible use after free.
Fix that by setting NULL to the array entry and checking that we have non-zero at the array entry when iterating over the array in free_irq_cpu_rmap().
The current code does not suffer from this since there are no cases where irq_set_affinity_notifier(irq, NULL) (note the NULL passed for the notify arg) is called, followed by a call to free_irq_cpu_rmap() so we don't hit and issue. Subsequent patches in this series excersize this flow, hence the required fix.
Cc: Thomas Gleixner tglx@linutronix.de Signed-off-by: Eli Cohen elic@nvidia.com Signed-off-by: Saeed Mahameed saeedm@nvidia.com Reviewed-by: Jacob Keller jacob.e.keller@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- lib/cpu_rmap.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index f610b2a10b3ed..f52389054a24f 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -235,7 +235,8 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
for (index = 0; index < rmap->used; index++) { glue = rmap->obj[index]; - irq_set_affinity_notifier(glue->notify.irq, NULL); + if (glue) + irq_set_affinity_notifier(glue->notify.irq, NULL); }
cpu_rmap_put(rmap); @@ -271,6 +272,7 @@ static void irq_cpu_rmap_release(struct kref *ref) container_of(ref, struct irq_glue, notify.kref);
cpu_rmap_put(glue->rmap); + glue->rmap->obj[glue->index] = NULL; kfree(glue); }
@@ -300,6 +302,7 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) rc = irq_set_affinity_notifier(irq, &glue->notify); if (rc) { cpu_rmap_put(glue->rmap); + rmap->obj[glue->index] = NULL; kfree(glue); } return rc;
From: Zheng Wang zyytlz.wz@163.com
[ Upstream commit f486893288f3e9b171b836f43853a6426515d800 ]
mptlan_probe() calls mpt_register_lan_device() which initializes the &priv->post_buckets_task workqueue. A call to mpt_lan_wake_post_buckets_task() will subsequently start the work.
During driver unload in mptlan_remove() the following race may occur:
CPU0 CPU1
|mpt_lan_post_receive_buckets_work() mptlan_remove() | free_netdev() | kfree(dev); | | | dev->mtu | //use
Fix this by finishing the work prior to cleaning up in mptlan_remove().
[mkp: we really should remove mptlan instead of attempting to fix it]
Signed-off-by: Zheng Wang zyytlz.wz@163.com Link: https://lore.kernel.org/r/20230318081635.796479-1-zyytlz.wz@163.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/message/fusion/mptlan.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index ebc00d47abf52..624803a887d8f 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -1430,7 +1430,9 @@ mptlan_remove(struct pci_dev *pdev) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); struct net_device *dev = ioc->netdev; + struct mpt_lan_priv *priv = netdev_priv(dev);
+ cancel_delayed_work_sync(&priv->post_buckets_task); if(dev != NULL) { unregister_netdev(dev); free_netdev(dev);
From: Andreas Gruenbacher agruenba@redhat.com
[ Upstream commit cfcdb5bad34f600aed7613c3c1a5e618111f77b7 ]
The maximum allowed height of an inode's metadata tree depends on the filesystem block size; it is lower for bigger-block filesystems. When reading in an inode, make sure that the height doesn't exceed the maximum allowed height.
Arrays like sd_heightsize are sized to be big enough for any filesystem block size; they will often be slightly bigger than what's needed for a specific filesystem.
Reported-by: syzbot+45d4691b1ed3c48eba05@syzkaller.appspotmail.com Signed-off-by: Andreas Gruenbacher agruenba@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/gfs2/glops.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index a7a423adf7c8b..ff35cc365930c 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -339,6 +339,7 @@ static int inode_go_demote_ok(const struct gfs2_glock *gl)
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) { + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); const struct gfs2_dinode *str = buf; struct timespec64 atime; u16 height, depth; @@ -378,7 +379,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ gfs2_set_inode_flags(&ip->i_inode); height = be16_to_cpu(str->di_height); - if (unlikely(height > GFS2_MAX_META_HEIGHT)) + if (unlikely(height > sdp->sd_max_height)) goto corrupt; ip->i_height = (u8)height;
From: Kemeng Shi shikemeng@huaweicloud.com
[ Upstream commit b07ffe6927c75d99af534d685282ea188d9f71a6 ]
We need to set ac_g_ex to notify the goal start used in ext4_mb_find_by_goal. Set ac_g_ex instead of ac_f_ex in ext4_mb_normalize_request. Besides we should assure goal start is in range [first_data_block, blocks_count) as ext4_mb_initialize_context does.
[ Added a check to make sure size is less than ar->pright; otherwise we could end up passing an underflowed value of ar->pright - size to ext4_get_group_no_and_offset(), which will trigger a BUG_ON later on. - TYT ]
Signed-off-by: Kemeng Shi shikemeng@huaweicloud.com Reviewed-by: Ritesh Harjani (IBM) ritesh.list@gmail.com Link: https://lore.kernel.org/r/20230303172120.3800725-2-shikemeng@huaweicloud.com Signed-off-by: Theodore Ts'o tytso@mit.edu Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ext4/mballoc.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4ea4fe92eb8c9..13032903d7423 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3089,6 +3089,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); + struct ext4_super_block *es = sbi->s_es; int bsbits, max; ext4_lblk_t end; loff_t size, start_off; @@ -3269,18 +3270,21 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
/* define goal start in order to merge */ - if (ar->pright && (ar->lright == (start + size))) { + if (ar->pright && (ar->lright == (start + size)) && + ar->pright >= size && + ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { /* merge to the right */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, - &ac->ac_f_ex.fe_group, - &ac->ac_f_ex.fe_start); + &ac->ac_g_ex.fe_group, + &ac->ac_g_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } - if (ar->pleft && (ar->lleft + 1 == start)) { + if (ar->pleft && (ar->lleft + 1 == start) && + ar->pleft + 1 < ext4_blocks_count(es)) { /* merge to the left */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, - &ac->ac_f_ex.fe_group, - &ac->ac_f_ex.fe_start); + &ac->ac_g_ex.fe_group, + &ac->ac_g_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; }
From: Ojaswin Mujoo ojaswin@linux.ibm.com
[ Upstream commit 93cdf49f6eca5e23f6546b8f28457b2e6a6961d9 ]
When the length of best extent found is less than the length of goal extent we need to make sure that the best extent atleast covers the start of the original request. This is done by adjusting the ac_b_ex.fe_logical (logical start) of the extent.
While doing so, the current logic sometimes results in the best extent's logical range overflowing the goal extent. Since this best extent is later added to the inode preallocation list, we have a possibility of introducing overlapping preallocations. This is discussed in detail here [1].
As per Jan's suggestion, to fix this, replace the existing logic with the below logic for adjusting best extent as it keeps fragmentation in check while ensuring logical range of best extent doesn't overflow out of goal extent:
1. Check if best extent can be kept at end of goal range and still cover original start. 2. Else, check if best extent can be kept at start of goal range and still cover original start. 3. Else, keep the best extent at start of original request.
Also, add a few extra BUG_ONs that might help catch errors faster.
[1] https://lore.kernel.org/r/Y+OGkVvzPN0RMv0O@li-bb2b2a4c-3307-11b2-a85c-8fa5c3...
Suggested-by: Jan Kara jack@suse.cz Signed-off-by: Ojaswin Mujoo ojaswin@linux.ibm.com Reviewed-by: Ritesh Harjani (IBM) ritesh.list@gmail.com Reviewed-by: Jan Kara jack@suse.cz Link: https://lore.kernel.org/r/f96aca6d415b36d1f90db86c1a8cd7e2e9d7ab0e.167973181... Signed-off-by: Theodore Ts'o tytso@mit.edu Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ext4/mballoc.c | 49 ++++++++++++++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 18 deletions(-)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 13032903d7423..e1391b64b43b5 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3376,6 +3376,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, BUG_ON(start < pa->pa_pstart); BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); BUG_ON(pa->pa_free < len); + BUG_ON(ac->ac_b_ex.fe_len <= 0); pa->pa_free -= len;
mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); @@ -3680,10 +3681,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) return -ENOMEM;
if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { - int winl; - int wins; - int win; - int offs; + int new_bex_start; + int new_bex_end;
/* we can't allocate as much as normalizer wants. * so, found space must get proper lstart @@ -3691,26 +3690,40 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
- /* we're limited by original request in that - * logical block must be covered any way - * winl is window we can move our chunk within */ - winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; + /* + * Use the below logic for adjusting best extent as it keeps + * fragmentation in check while ensuring logical range of best + * extent doesn't overflow out of goal extent: + * + * 1. Check if best ex can be kept at end of goal and still + * cover original start + * 2. Else, check if best ex can be kept at start of goal and + * still cover original start + * 3. Else, keep the best ex at start of original request. + */ + new_bex_end = ac->ac_g_ex.fe_logical + + EXT4_C2B(sbi, ac->ac_g_ex.fe_len); + new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len); + if (ac->ac_o_ex.fe_logical >= new_bex_start) + goto adjust_bex;
- /* also, we should cover whole original request */ - wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); + new_bex_start = ac->ac_g_ex.fe_logical; + new_bex_end = + new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); + if (ac->ac_o_ex.fe_logical < new_bex_end) + goto adjust_bex;
- /* the smallest one defines real window */ - win = min(winl, wins); + new_bex_start = ac->ac_o_ex.fe_logical; + new_bex_end = + new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
- offs = ac->ac_o_ex.fe_logical % - EXT4_C2B(sbi, ac->ac_b_ex.fe_len); - if (offs && offs < win) - win = offs; +adjust_bex: + ac->ac_b_ex.fe_logical = new_bex_start;
- ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - - EXT4_NUM_B2C(sbi, win); BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); + BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical + + EXT4_C2B(sbi, ac->ac_g_ex.fe_len))); }
/* preallocation can change ac_b_ex, thus we store actually
From: Chao Yu chao@kernel.org
[ Upstream commit c9b3649a934d131151111354bcbb638076f03a30 ]
xfstest generic/361 reports a bug as below:
f2fs_bug_on(sbi, sbi->fsync_node_num);
kernel BUG at fs/f2fs/super.c:1627! RIP: 0010:f2fs_put_super+0x3a8/0x3b0 Call Trace: generic_shutdown_super+0x8c/0x1b0 kill_block_super+0x2b/0x60 kill_f2fs_super+0x87/0x110 deactivate_locked_super+0x39/0x80 deactivate_super+0x46/0x50 cleanup_mnt+0x109/0x170 __cleanup_mnt+0x16/0x20 task_work_run+0x65/0xa0 exit_to_user_mode_prepare+0x175/0x190 syscall_exit_to_user_mode+0x25/0x50 do_syscall_64+0x4c/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc
During umount(), if cp_error is set, f2fs_wait_on_all_pages() should not stop waiting all F2FS_WB_CP_DATA pages to be writebacked, otherwise, fsync_node_num can be non-zero after f2fs_wait_on_all_pages() causing this bug.
In this case, to avoid deadloop in f2fs_wait_on_all_pages(), it needs to drop all dirty pages rather than redirtying them.
Signed-off-by: Chao Yu chao@kernel.org Signed-off-by: Jaegeuk Kim jaegeuk@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/f2fs/checkpoint.c | 12 ++++++++++-- fs/f2fs/data.c | 3 ++- 2 files changed, 12 insertions(+), 3 deletions(-)
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index a563de5ccd217..621e0d4f1fbf5 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -273,8 +273,15 @@ static int __f2fs_write_meta_page(struct page *page,
trace_f2fs_writepage(page, META);
- if (unlikely(f2fs_cp_error(sbi))) + if (unlikely(f2fs_cp_error(sbi))) { + if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) { + ClearPageUptodate(page); + dec_page_count(sbi, F2FS_DIRTY_META); + unlock_page(page); + return 0; + } goto redirty_out; + } if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0)) @@ -1185,7 +1192,8 @@ void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) if (!get_pages(sbi, F2FS_WB_CP_DATA)) break;
- if (unlikely(f2fs_cp_error(sbi))) + if (unlikely(f2fs_cp_error(sbi) && + !is_sbi_flag_set(sbi, SBI_IS_CLOSE))) break;
io_schedule_timeout(5*HZ); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 56b2dadd623b2..419586809cef6 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1885,7 +1885,8 @@ static int __write_data_page(struct page *page, bool *submitted, * don't drop any dirty dentry pages for keeping lastest * directory structure. */ - if (S_ISDIR(inode->i_mode)) + if (S_ISDIR(inode->i_mode) && + !is_sbi_flag_set(sbi, SBI_IS_CLOSE)) goto redirty_out; goto out; }
From: Hans de Goede hdegoede@redhat.com
[ Upstream commit ef16799640865f937719f0771c93be5dca18adc6 ]
A received TKIP key may be up to 32 bytes because it may contain MIC rx/tx keys too. These are not used by iwl and copying these over overflows the iwl_keyinfo.key field.
Add a check to not copy more data to iwl_keyinfo.key then will fit.
This fixes backtraces like this one:
memcpy: detected field-spanning write (size 32) of single field "sta_cmd.key.key" at drivers/net/wireless/intel/iwlwifi/dvm/sta.c:1103 (size 16) WARNING: CPU: 1 PID: 946 at drivers/net/wireless/intel/iwlwifi/dvm/sta.c:1103 iwlagn_send_sta_key+0x375/0x390 [iwldvm] <snip> Hardware name: Dell Inc. Latitude E6430/0H3MT5, BIOS A21 05/08/2017 RIP: 0010:iwlagn_send_sta_key+0x375/0x390 [iwldvm] <snip> Call Trace: <TASK> iwl_set_dynamic_key+0x1f0/0x220 [iwldvm] iwlagn_mac_set_key+0x1e4/0x280 [iwldvm] drv_set_key+0xa4/0x1b0 [mac80211] ieee80211_key_enable_hw_accel+0xa8/0x2d0 [mac80211] ieee80211_key_replace+0x22d/0x8e0 [mac80211] <snip>
Link: https://www.alionet.org/index.php?topic=1469.0 Link: https://lore.kernel.org/linux-wireless/20230218191056.never.374-kees@kernel.... Link: https://lore.kernel.org/linux-wireless/68760035-7f75-1b23-e355-bfb758a87d83@... Cc: Kees Cook keescook@chromium.org Suggested-by: Johannes Berg johannes@sipsolutions.net Signed-off-by: Hans de Goede hdegoede@redhat.com Reviewed-by: Kees Cook keescook@chromium.org Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/intel/iwlwifi/dvm/sta.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c index de6ec9b7ace45..f30bac02d32ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c @@ -1101,6 +1101,7 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv, { __le16 key_flags; struct iwl_addsta_cmd sta_cmd; + size_t to_copy; int i;
spin_lock_bh(&priv->sta_lock); @@ -1120,7 +1121,9 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv, sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32; for (i = 0; i < 5; i++) sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); - memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen); + /* keyconf may contain MIC rx/tx keys which iwl does not use */ + to_copy = min_t(size_t, sizeof(sta_cmd.key.key), keyconf->keylen); + memcpy(sta_cmd.key.key, keyconf->key, to_copy); break; case WLAN_CIPHER_SUITE_WEP104: key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
From: Min Li lm0963hack@gmail.com
[ Upstream commit 25e97f7b1866e6b8503be349eeea44bb52d661ce ]
conn->chan_lock isn't acquired before l2cap_get_chan_by_scid, if l2cap_get_chan_by_scid returns NULL, then 'bad unlock balance' is triggered.
Reported-by: syzbot+9519d6b5b79cf7787cf3@syzkaller.appspotmail.com Link: https://lore.kernel.org/all/000000000000894f5f05f95e9f4d@google.com/ Signed-off-by: Min Li lm0963hack@gmail.com Signed-off-by: Luiz Augusto von Dentz luiz.von.dentz@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/bluetooth/l2cap_core.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 1a68aad5737e1..94d40a20ab958 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -4392,7 +4392,6 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) { - mutex_unlock(&conn->chan_lock); return 0; }
linux-stable-mirror@lists.linaro.org