In the SDMA s0ix save process requires to turn off SDMA ring buffer for avoiding the SDMA in-flight request, otherwise will suffer from SDMA page fault which causes by page request from in-flight SDMA ring accessing at SDMA restore phase.
Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2248 Cc: stable@vger.kernel.org # 6.0 Fixes: f8f4e2a51834 ("drm/amdgpu: skipping SDMA hw_init and hw_fini for S0ix.")
Signed-off-by: Prike Liang Prike.Liang@amd.com --- -v2: change the name sdma_v4_0_gfx_stop() to sdma_v4_0_gfx_enable() (Lijo) --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 1122bd4eae98..4d780e4430e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -907,13 +907,13 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
/** - * sdma_v4_0_gfx_stop - stop the gfx async dma engines + * sdma_v4_0_gfx_enable - enable the gfx async dma engines * * @adev: amdgpu_device pointer - * - * Stop the gfx async dma ring buffers (VEGA10). + * @enable: enable SDMA RB/IB + * control the gfx async dma ring buffers (VEGA10). */ -static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) +static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable) { u32 rb_cntl, ib_cntl; int i; @@ -922,10 +922,10 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0); WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, enable ? 1 : 0); WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); } } @@ -1044,7 +1044,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) int i;
if (!enable) { - sdma_v4_0_gfx_stop(adev); + sdma_v4_0_gfx_enable(adev, enable); sdma_v4_0_rlc_stop(adev); if (adev->sdma.has_page_queue) sdma_v4_0_page_stop(adev); @@ -1960,8 +1960,10 @@ static int sdma_v4_0_suspend(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SMU saves SDMA state for us */ - if (adev->in_s0ix) + if (adev->in_s0ix) { + sdma_v4_0_gfx_enable(adev, false); return 0; + }
return sdma_v4_0_hw_fini(adev); } @@ -1971,8 +1973,12 @@ static int sdma_v4_0_resume(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SMU restores SDMA state for us */ - if (adev->in_s0ix) + if (adev->in_s0ix) { + sdma_v4_0_enable(adev, true); + sdma_v4_0_gfx_enable(adev, true); + amdgpu_ttm_set_buffer_funcs_status(adev, true); return 0; + }
return sdma_v4_0_hw_init(adev); }
On Thu, Dec 1, 2022 at 2:56 AM Prike Liang Prike.Liang@amd.com wrote:
In the SDMA s0ix save process requires to turn off SDMA ring buffer for avoiding the SDMA in-flight request, otherwise will suffer from SDMA page fault which causes by page request from in-flight SDMA ring accessing at SDMA restore phase.
Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2248 Cc: stable@vger.kernel.org # 6.0 Fixes: f8f4e2a51834 ("drm/amdgpu: skipping SDMA hw_init and hw_fini for S0ix.")
Signed-off-by: Prike Liang Prike.Liang@amd.com
Reviewed-by: Alex Deucher alexander.deucher@amd.com
-v2: change the name sdma_v4_0_gfx_stop() to sdma_v4_0_gfx_enable() (Lijo)
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 1122bd4eae98..4d780e4430e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -907,13 +907,13 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
/**
- sdma_v4_0_gfx_stop - stop the gfx async dma engines
- sdma_v4_0_gfx_enable - enable the gfx async dma engines
- @adev: amdgpu_device pointer
- Stop the gfx async dma ring buffers (VEGA10).
- @enable: enable SDMA RB/IB
*/
- control the gfx async dma ring buffers (VEGA10).
-static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) +static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable) { u32 rb_cntl, ib_cntl; int i; @@ -922,10 +922,10 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0); WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, enable ? 1 : 0); WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); }
} @@ -1044,7 +1044,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) int i;
if (!enable) {
sdma_v4_0_gfx_stop(adev);
sdma_v4_0_gfx_enable(adev, enable); sdma_v4_0_rlc_stop(adev); if (adev->sdma.has_page_queue) sdma_v4_0_page_stop(adev);
@@ -1960,8 +1960,10 @@ static int sdma_v4_0_suspend(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SMU saves SDMA state for us */
if (adev->in_s0ix)
if (adev->in_s0ix) {
sdma_v4_0_gfx_enable(adev, false); return 0;
} return sdma_v4_0_hw_fini(adev);
} @@ -1971,8 +1973,12 @@ static int sdma_v4_0_resume(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SMU restores SDMA state for us */
if (adev->in_s0ix)
if (adev->in_s0ix) {
sdma_v4_0_enable(adev, true);
sdma_v4_0_gfx_enable(adev, true);
amdgpu_ttm_set_buffer_funcs_status(adev, true); return 0;
} return sdma_v4_0_hw_init(adev);
}
2.25.1
On 12/1/2022 07:39, Alex Deucher wrote:
On Thu, Dec 1, 2022 at 2:56 AM Prike Liang Prike.Liang@amd.com wrote:
In the SDMA s0ix save process requires to turn off SDMA ring buffer for avoiding the SDMA in-flight request, otherwise will suffer from SDMA page fault which causes by page request from in-flight SDMA ring accessing at SDMA restore phase.
Link: https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgitlab.fre... Cc: stable@vger.kernel.org # 6.0 >> Fixes: f8f4e2a51834 ("drm/amdgpu: skipping SDMA hw_init and hw_fini
for S0ix.")
I double checked and f8f4e2a51834 got backported to 5.15.y too as 960c8a55016b.
So this should be:
Cc: stable@vger.kernel.org # 5.15
Signed-off-by: Prike Liang Prike.Liang@amd.com
Reviewed-by: Alex Deucher alexander.deucher@amd.com
Tested-by: Mario Limonciello mario.limonciello@amd.com
-v2: change the name sdma_v4_0_gfx_stop() to sdma_v4_0_gfx_enable() (Lijo)
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 1122bd4eae98..4d780e4430e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -907,13 +907,13 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
/**
- sdma_v4_0_gfx_stop - stop the gfx async dma engines
- sdma_v4_0_gfx_enable - enable the gfx async dma engines
- @adev: amdgpu_device pointer
- Stop the gfx async dma ring buffers (VEGA10).
- @enable: enable SDMA RB/IB
*/
- control the gfx async dma ring buffers (VEGA10).
-static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) +static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable) { u32 rb_cntl, ib_cntl; int i; @@ -922,10 +922,10 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0); WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
}ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, enable ? 1 : 0); WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); }
@@ -1044,7 +1044,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) int i;
if (!enable) {
sdma_v4_0_gfx_stop(adev);
sdma_v4_0_gfx_enable(adev, enable); sdma_v4_0_rlc_stop(adev); if (adev->sdma.has_page_queue) sdma_v4_0_page_stop(adev);
@@ -1960,8 +1960,10 @@ static int sdma_v4_0_suspend(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SMU saves SDMA state for us */
if (adev->in_s0ix)
if (adev->in_s0ix) {
sdma_v4_0_gfx_enable(adev, false); return 0;
} return sdma_v4_0_hw_fini(adev);
}
@@ -1971,8 +1973,12 @@ static int sdma_v4_0_resume(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SMU restores SDMA state for us */
if (adev->in_s0ix)
if (adev->in_s0ix) {
sdma_v4_0_enable(adev, true);
sdma_v4_0_gfx_enable(adev, true);
amdgpu_ttm_set_buffer_funcs_status(adev, true); return 0;
} return sdma_v4_0_hw_init(adev);
}
-- 2.25.1
linux-stable-mirror@lists.linaro.org