Use "#ifdef" instead of "#if", as it is possible to select KVM
without enabling RETPOLINE.
Adding the following list of flags on top of tinyconfig is an
example of a failing config file:
CONFIG_64BIT=y
CONFIG_PCI=y
CONFIG_ACPI=y
CONFIG_VIRTUALIZATION=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_CRYPTO=y
CONFIG_DMADEVICES=y
CONFIG_X86_MCE=y
CONFIG_RETPOLINE=y
CONFIG_MEMORY_FAILURE=y
CONFIG_KVM=y
CONFIG_KVM_AMD=y
CONFIG_CRYPTO_DEV_CCP=y
CONFIG_CRYPTO_DEV_CCP_DD=y
CONFIG_CRYPTO_DEV_SP_CCP=y
CONFIG_CRYPTO_DEV_SP_PSP=y
CONFIG_KVM_AMD_SEV=y
CONFIG_AMD_MEM_ENCRYPT=y
CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=n
Cc: stable(a)vger.kernel.org # 5.19
Cc: Jarkko Sakkinen <jarkko(a)kernel.org>
Fixes: d1f5c8366288 ("KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by TDX and SNP")
Signed-off-by: Jarkko Sakkinen <jarkko(a)profian.com>
---
arch/x86/kvm/mmu/mmu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0b99ee4ea184..e08c7e85bbb9 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4213,7 +4213,7 @@ kvm_pfn_t kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa,
* direct_page_fault() when appropriate.
*/
//r = direct_page_fault(vcpu, &fault);
-#if CONFIG_RETPOLINE
+#ifdef CONFIG_RETPOLINE
if (fault.is_tdp)
r = kvm_tdp_page_fault(vcpu, &fault);
#else
--
2.36.1
From: Christoph Hellwig <hch(a)lst.de>
From: Christoph Hellwig <hch(a)lst.de>
Upstream commit: 3175199ab0ac ("block: split bio_kmalloc from bio_alloc_bioset")
This is backport to stable 5.10. It fixes an issue reported by syzbot.
Link: https://syzkaller.appspot.com/bug?id=a3416231e37024a75f2b95bd95db0d8ce8132a…
bio_kmalloc shares almost no logic with the bio_set based fast path
in bio_alloc_bioset. Split it into an entirely separate implementation.
Reported-by: syzbot+4f441e6ca0fcad141421(a)syzkaller.appspotmail.com
Signed-off-by: Christoph Hellwig <hch(a)lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn(a)wdc.com>
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni(a)wdc.com>
Acked-by: Damien Le Moal <damien.lemoal(a)wdc.com>
Signed-off-by: Jens Axboe <axboe(a)kernel.dk>
Signed-off-by: Tadeusz Struk <tadeusz.struk(a)linaro.org>
---
block/bio.c | 166 +++++++++++++++++++++++---------------------
include/linux/bio.h | 6 +-
2 files changed, 86 insertions(+), 86 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index f8d26ce7b61b..be59276e462e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -405,122 +405,101 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
* @nr_iovecs: number of iovecs to pre-allocate
* @bs: the bio_set to allocate from.
*
- * Description:
- * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
- * backed by the @bs's mempool.
+ * Allocate a bio from the mempools in @bs.
*
- * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
- * always be able to allocate a bio. This is due to the mempool guarantees.
- * To make this work, callers must never allocate more than 1 bio at a time
- * from this pool. Callers that need to allocate more than 1 bio must always
- * submit the previously allocated bio for IO before attempting to allocate
- * a new one. Failure to do so can cause deadlocks under memory pressure.
+ * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
+ * allocate a bio. This is due to the mempool guarantees. To make this work,
+ * callers must never allocate more than 1 bio at a time from the general pool.
+ * Callers that need to allocate more than 1 bio must always submit the
+ * previously allocated bio for IO before attempting to allocate a new one.
+ * Failure to do so can cause deadlocks under memory pressure.
*
- * Note that when running under submit_bio_noacct() (i.e. any block
- * driver), bios are not submitted until after you return - see the code in
- * submit_bio_noacct() that converts recursion into iteration, to prevent
- * stack overflows.
+ * Note that when running under submit_bio_noacct() (i.e. any block driver),
+ * bios are not submitted until after you return - see the code in
+ * submit_bio_noacct() that converts recursion into iteration, to prevent
+ * stack overflows.
*
- * This would normally mean allocating multiple bios under
- * submit_bio_noacct() would be susceptible to deadlocks, but we have
- * deadlock avoidance code that resubmits any blocked bios from a rescuer
- * thread.
+ * This would normally mean allocating multiple bios under submit_bio_noacct()
+ * would be susceptible to deadlocks, but we have
+ * deadlock avoidance code that resubmits any blocked bios from a rescuer
+ * thread.
*
- * However, we do not guarantee forward progress for allocations from other
- * mempools. Doing multiple allocations from the same mempool under
- * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
- * for per bio allocations.
+ * However, we do not guarantee forward progress for allocations from other
+ * mempools. Doing multiple allocations from the same mempool under
+ * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
+ * for per bio allocations.
*
- * RETURNS:
- * Pointer to new bio on success, NULL on failure.
+ * Returns: Pointer to new bio on success, NULL on failure.
*/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
struct bio_set *bs)
{
gfp_t saved_gfp = gfp_mask;
- unsigned front_pad;
- unsigned inline_vecs;
- struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
- if (!bs) {
- if (nr_iovecs > UIO_MAXIOV)
- return NULL;
-
- p = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
- front_pad = 0;
- inline_vecs = nr_iovecs;
- } else {
- /* should not use nobvec bioset for nr_iovecs > 0 */
- if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
- nr_iovecs > 0))
- return NULL;
- /*
- * submit_bio_noacct() converts recursion to iteration; this
- * means if we're running beneath it, any bios we allocate and
- * submit will not be submitted (and thus freed) until after we
- * return.
- *
- * This exposes us to a potential deadlock if we allocate
- * multiple bios from the same bio_set() while running
- * underneath submit_bio_noacct(). If we were to allocate
- * multiple bios (say a stacking block driver that was splitting
- * bios), we would deadlock if we exhausted the mempool's
- * reserve.
- *
- * We solve this, and guarantee forward progress, with a rescuer
- * workqueue per bio_set. If we go to allocate and there are
- * bios on current->bio_list, we first try the allocation
- * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
- * bios we would be blocking to the rescuer workqueue before
- * we retry with the original gfp_flags.
- */
-
- if (current->bio_list &&
- (!bio_list_empty(¤t->bio_list[0]) ||
- !bio_list_empty(¤t->bio_list[1])) &&
- bs->rescue_workqueue)
- gfp_mask &= ~__GFP_DIRECT_RECLAIM;
+ /* should not use nobvec bioset for nr_iovecs > 0 */
+ if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
+ return NULL;
+ /*
+ * submit_bio_noacct() converts recursion to iteration; this means if
+ * we're running beneath it, any bios we allocate and submit will not be
+ * submitted (and thus freed) until after we return.
+ *
+ * This exposes us to a potential deadlock if we allocate multiple bios
+ * from the same bio_set() while running underneath submit_bio_noacct().
+ * If we were to allocate multiple bios (say a stacking block driver
+ * that was splitting bios), we would deadlock if we exhausted the
+ * mempool's reserve.
+ *
+ * We solve this, and guarantee forward progress, with a rescuer
+ * workqueue per bio_set. If we go to allocate and there are bios on
+ * current->bio_list, we first try the allocation without
+ * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
+ * blocking to the rescuer workqueue before we retry with the original
+ * gfp_flags.
+ */
+ if (current->bio_list &&
+ (!bio_list_empty(¤t->bio_list[0]) ||
+ !bio_list_empty(¤t->bio_list[1])) &&
+ bs->rescue_workqueue)
+ gfp_mask &= ~__GFP_DIRECT_RECLAIM;
+
+ p = mempool_alloc(&bs->bio_pool, gfp_mask);
+ if (!p && gfp_mask != saved_gfp) {
+ punt_bios_to_rescuer(bs);
+ gfp_mask = saved_gfp;
p = mempool_alloc(&bs->bio_pool, gfp_mask);
- if (!p && gfp_mask != saved_gfp) {
- punt_bios_to_rescuer(bs);
- gfp_mask = saved_gfp;
- p = mempool_alloc(&bs->bio_pool, gfp_mask);
- }
-
- front_pad = bs->front_pad;
- inline_vecs = BIO_INLINE_VECS;
}
-
if (unlikely(!p))
return NULL;
- bio = p + front_pad;
- bio_init(bio, NULL, 0);
-
- if (nr_iovecs > inline_vecs) {
+ bio = p + bs->front_pad;
+ if (nr_iovecs > BIO_INLINE_VECS) {
unsigned long idx = 0;
+ struct bio_vec *bvl = NULL;
bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
if (!bvl && gfp_mask != saved_gfp) {
punt_bios_to_rescuer(bs);
gfp_mask = saved_gfp;
- bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
+ bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx,
+ &bs->bvec_pool);
}
if (unlikely(!bvl))
goto err_free;
bio->bi_flags |= idx << BVEC_POOL_OFFSET;
+ bio_init(bio, bvl, bvec_nr_vecs(idx));
} else if (nr_iovecs) {
- bvl = bio->bi_inline_vecs;
+ bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
+ } else {
+ bio_init(bio, NULL, 0);
}
bio->bi_pool = bs;
- bio->bi_max_vecs = nr_iovecs;
- bio->bi_io_vec = bvl;
return bio;
err_free:
@@ -529,6 +508,31 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
}
EXPORT_SYMBOL(bio_alloc_bioset);
+/**
+ * bio_kmalloc - kmalloc a bio for I/O
+ * @gfp_mask: the GFP_* mask given to the slab allocator
+ * @nr_iovecs: number of iovecs to pre-allocate
+ *
+ * Use kmalloc to allocate and initialize a bio.
+ *
+ * Returns: Pointer to new bio on success, NULL on failure.
+ */
+struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+{
+ struct bio *bio;
+
+ if (nr_iovecs > UIO_MAXIOV)
+ return NULL;
+
+ bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
+ if (unlikely(!bio))
+ return NULL;
+ bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
+ bio->bi_pool = NULL;
+ return bio;
+}
+EXPORT_SYMBOL(bio_kmalloc);
+
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
{
unsigned long flags;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 23b7a73cd757..1c790e48dcef 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -390,6 +390,7 @@ extern int biovec_init_pool(mempool_t *pool, int pool_entries);
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
+struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs);
extern void bio_put(struct bio *);
extern void __bio_clone_fast(struct bio *, struct bio *);
@@ -402,11 +403,6 @@ static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
}
-static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
-}
-
extern blk_qc_t submit_bio(struct bio *);
extern void bio_endio(struct bio *);
--
2.36.1