From: Boyuan Zhang boyuan.zhang@amd.com
Determine whether VCN using unified queue in sw_init, instead of calling functions later on.
v2: fix coding style
Signed-off-by: Boyuan Zhang boyuan.zhang@amd.com Acked-by: Alex Deucher alexander.deucher@amd.com Reviewed-by: Ruijing Dong ruijing.dong@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com (cherry picked from commit ecfa23c8df7ef3ea2a429dfe039341bf792e95b4) Modified for lack of amdgpu_ip_version() symbol. Signed-off-by: Mario Limonciello mario.limonciello@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 39 ++++++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 + 2 files changed, 16 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 03b4bcfca196..199fb3c50de1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -135,6 +135,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) } }
+ /* from vcn4 and above, only unified queue is used */ + adev->vcn.using_unified_queue = + adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0); + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
@@ -259,18 +263,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) return 0; }
-/* from vcn4 and above, only unified queue is used */ -static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - bool ret = false; - - if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) - ret = true; - - return ret; -} - bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) { bool ret = false; @@ -707,12 +699,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, struct amdgpu_job *job; struct amdgpu_ib *ib; uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); - bool sq = amdgpu_vcn_using_unified_queue(ring); uint32_t *ib_checksum; uint32_t ib_pack_in_dw; int i, r;
- if (sq) + if (adev->vcn.using_unified_queue) ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, @@ -725,7 +716,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, ib->length_dw = 0;
/* single queue headers */ - if (sq) { + if (adev->vcn.using_unified_queue) { ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t) + 4 + 2; /* engine info + decoding ib in dw */ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false); @@ -744,7 +735,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0;
- if (sq) + if (adev->vcn.using_unified_queue) amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
r = amdgpu_job_submit_direct(job, ring, &f); @@ -834,15 +825,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand struct dma_fence **fence) { unsigned int ib_size_dw = 16; + struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; uint32_t *ib_checksum = NULL; uint64_t addr; - bool sq = amdgpu_vcn_using_unified_queue(ring); int i, r;
- if (sq) + if (adev->vcn.using_unified_queue) ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, @@ -856,7 +847,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
ib->length_dw = 0;
- if (sq) + if (adev->vcn.using_unified_queue) ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018; @@ -878,7 +869,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0;
- if (sq) + if (adev->vcn.using_unified_queue) amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f); @@ -901,15 +892,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han struct dma_fence **fence) { unsigned int ib_size_dw = 16; + struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; uint32_t *ib_checksum = NULL; uint64_t addr; - bool sq = amdgpu_vcn_using_unified_queue(ring); int i, r;
- if (sq) + if (adev->vcn.using_unified_queue) ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, @@ -923,7 +914,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
ib->length_dw = 0;
- if (sq) + if (adev->vcn.using_unified_queue) ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018; @@ -945,7 +936,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0;
- if (sq) + if (adev->vcn.using_unified_queue) amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index a3eed90b6af0..3dc2cffdae4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -284,6 +284,7 @@ struct amdgpu_vcn {
uint16_t inst_mask; uint8_t num_inst_per_aid; + bool using_unified_queue; };
struct amdgpu_fw_shared_rb_ptrs_struct {