From: Adi Suresh adisuresh@google.com
commit db96c2cb4870173ea9b08df130f1d1cc9b5dd53d upstream.
The previous commit had a bug where the last page in the memory range could not be synced. This change fixes the behavior so that all the required pages are synced.
Fixes: 9cfeeb576d49 ("gve: Fixes DMA synchronization") Signed-off-by: Adi Suresh adisuresh@google.com Reviewed-by: Catherine Sullivan csully@google.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- drivers/net/ethernet/google/gve/gve_tx.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
--- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -393,12 +393,13 @@ static void gve_tx_fill_seg_desc(union g static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, u64 iov_offset, u64 iov_len) { + u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; + u64 first_page = iov_offset / PAGE_SIZE; dma_addr_t dma; - u64 addr; + u64 page;
- for (addr = iov_offset; addr < iov_offset + iov_len; - addr += PAGE_SIZE) { - dma = page_buses[addr / PAGE_SIZE]; + for (page = first_page; page <= last_page; page++) { + dma = page_buses[page]; dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE); } }