diff --git a/block/bio.c b/block/bio.c index 7b13bdf72de0..8793f1ee559d 100644 --- a/block/bio.c +++ b/block/bio.c @@ -843,6 +843,11 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) bio_clone_blkg_association(bio, bio_src); }
- if (bio_flagged(bio_src, BIO_DMA_TOKEN)) {
bio->dma_token = bio_src->dma_token;bio_set_flag(bio, BIO_DMA_TOKEN);- }
Historically __bio_clone itself does not clone the payload, just the bio. But we got rid of the callers that want to clone a bio but not the payload long time ago.
I'd suggest a prep patch that moves assigning bi_io_vec from bio_alloc_clone and bio_init_clone into __bio_clone, and given that they are the same field that'll take carw of the dma token as well. Alternatively do it in an if/else that the compiler will hopefully optimize away.
@@ -1349,6 +1366,10 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter, bio_iov_bvec_set(bio, iter); iov_iter_advance(iter, bio->bi_iter.bi_size); return 0;
- } else if (iov_iter_is_dma_token(iter)) {
No else after an return please.
+++ b/block/blk-merge.c @@ -328,6 +328,29 @@ int bio_split_io_at(struct bio *bio, const struct queue_limits *lim, unsigned nsegs = 0, bytes = 0, gaps = 0; struct bvec_iter iter;
- if (bio_flagged(bio, BIO_DMA_TOKEN)) {
Please split the dmabuf logic into a self-contained helper here.
int offset = offset_in_page(bio->bi_iter.bi_bvec_done);nsegs = ALIGN(bio->bi_iter.bi_size + offset, PAGE_SIZE);nsegs >>= PAGE_SHIFT;
Why are we hardcoding PAGE_SIZE based "segments" here?
if (offset & lim->dma_alignment || bytes & len_align_mask)return -EINVAL;if (bio->bi_iter.bi_size > max_bytes) {bytes = max_bytes;nsegs = (bytes + offset) >> PAGE_SHIFT;goto split;} else if (nsegs > lim->max_segments) {
No else after a goto either.