4.17-stable review patch. If anyone has any objections, please let me know.
------------------
From: Ben Skeggs bskeggs@redhat.com
[ Upstream commit 19ca10d82e33bcfe92412c461fc3534ec1e14747 ]
We previously only did this for push buffers, but an upcoming patch will need to attach fences to all VMAs to resolve another issue.
Signed-off-by: Ben Skeggs bskeggs@redhat.com Signed-off-by: Sasha Levin alexander.levin@microsoft.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- drivers/gpu/drm/nouveau/nouveau_dma.c | 10 +--------- drivers/gpu/drm/nouveau/nouveau_dma.h | 5 ++--- drivers/gpu/drm/nouveau/nouveau_gem.c | 19 ++++++++++++++++--- 3 files changed, 19 insertions(+), 15 deletions(-)
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -80,18 +80,10 @@ READ_GET(struct nouveau_channel *chan, u }
void -nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, - int delta, int length) +nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length) { - struct nouveau_cli *cli = (void *)chan->user.client; struct nouveau_bo *pb = chan->push.buffer; - struct nouveau_vma *vma; int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; - u64 offset; - - vma = nouveau_vma_find(bo, &cli->vmm); - BUG_ON(!vma); - offset = vma->addr + delta;
BUG_ON(chan->dma.ib_free < 1);
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -31,8 +31,7 @@ #include "nouveau_chan.h"
int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); -void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, - int delta, int length); +void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
/* * There's a hw race condition where you can't jump to your PUT offset, @@ -151,7 +150,7 @@ FIRE_RING(struct nouveau_channel *chan) chan->accel_done = true;
if (chan->dma.ib_max) { - nv50_dma_push(chan, chan->push.buffer, chan->dma.put << 2, + nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2), (chan->dma.cur - chan->dma.put) << 2); } else { WRITE_PUT(chan->dma.cur); --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -432,7 +432,20 @@ retry: } }
- b->user_priv = (uint64_t)(unsigned long)nvbo; + if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { + struct nouveau_vmm *vmm = &cli->vmm; + struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm); + if (!vma) { + NV_PRINTK(err, cli, "vma not found!\n"); + ret = -EINVAL; + break; + } + + b->user_priv = (uint64_t)(unsigned long)vma; + } else { + b->user_priv = (uint64_t)(unsigned long)nvbo; + } + nvbo->reserved_by = file_priv; nvbo->pbbo_index = i; if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && @@ -763,10 +776,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_dev }
for (i = 0; i < req->nr_push; i++) { - struct nouveau_bo *nvbo = (void *)(unsigned long) + struct nouveau_vma *vma = (void *)(unsigned long) bo[push[i].bo_index].user_priv;
- nv50_dma_push(chan, nvbo, push[i].offset, + nv50_dma_push(chan, vma->addr + push[i].offset, push[i].length); } } else