Hi all,
Today's linux-next merge of the drm tree got conflicts in:
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
between commits:
3eb46fbb601f9 ("drm/amdgpu/gfx11: adjust KGQ reset sequence")
dfd64f6e8cd7b ("drm/amdgpu/gfx12: adjust KGQ reset sequence")
from the origin tree and commits:
b340ff216fdab ("drm/amdgpu/gfx11: adjust KGQ reset sequence")
0a6d6ed694d72 ("drm/amdgpu/gfx12: adjust KGQ reset sequence")
from the drm tree.
I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging. You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.
diff --combined drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index e642236ea2c51,427975b5a1d97..0000000000000
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@@ -120,6 -120,10 +120,10 @@@ MODULE_FIRMWARE("amdgpu/gc_11_5_3_pfp.b
MODULE_FIRMWARE("amdgpu/gc_11_5_3_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_3_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_3_rlc.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_5_4_pfp.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_5_4_me.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_5_4_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_5_4_rlc.bin");
static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
@@@ -416,7 -420,8 +420,8 @@@ static void gfx11_kiq_unmap_queues(stru
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
- amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
+ amdgpu_mes_unmap_legacy_queue(adev, ring, action,
+ gpu_addr, seq, 0);
return;
}
@@@ -566,8 -571,8 +571,8 @@@ static int gfx_v11_0_ring_test_ring(str
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 5);
if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ drm_err(adev_to_drm(adev), "cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
return r;
}
@@@ -623,7 -628,7 +628,7 @@@ static int gfx_v11_0_ring_test_ib(struc
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r);
goto err1;
}
@@@ -917,7 -922,7 +922,7 @@@ static int gfx_v11_0_rlc_init(struct am
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0, NULL, 0xf);
return 0;
}
@@@ -1052,10 -1057,14 +1057,14 @@@ static void gfx_v11_0_select_me_pipe_q(
static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
struct amdgpu_gfx_shadow_info *shadow_info)
{
+ /* for gfx */
shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+ /* for compute */
+ shadow_info->eop_size = GFX11_MEC_HPD_SIZE;
+ shadow_info->eop_alignment = 256;
}
static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
@@@ -1080,6 -1089,7 +1089,7 @@@ static const struct amdgpu_gfx_funcs gf
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
.get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
+ .get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
};
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@@ -1107,6 -1117,7 +1117,7 @@@
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 5, 3):
+ case IP_VERSION(11, 5, 4):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@@ -1589,6 -1600,7 +1600,7 @@@ static int gfx_v11_0_sw_init(struct amd
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 5, 3):
+ case IP_VERSION(11, 5, 4):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 2;
@@@ -3046,7 -3058,8 +3058,8 @@@ static int gfx_v11_0_wait_for_rlc_autol
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 3))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 4))
bootload_status = RREG32_SOC15(GC, 0,
regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
else
@@@ -3617,7 -3630,7 +3630,7 @@@ static int gfx_v11_0_cp_gfx_start(struc
ring = &adev->gfx.gfx_ring[0];
r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ drm_err(&adev->ddev, "cp failed to lock ring (%d).\n", r);
return r;
}
@@@ -3662,7 -3675,7 +3675,7 @@@
ring = &adev->gfx.gfx_ring[1];
r = amdgpu_ring_alloc(ring, 2);
if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r);
return r;
}
@@@ -4593,7 -4606,7 +4606,7 @@@ static int gfx_v11_0_cp_resume(struct a
}
if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
- r = amdgpu_mes_kiq_hw_init(adev);
+ r = amdgpu_mes_kiq_hw_init(adev, 0);
else
r = gfx_v11_0_kiq_resume(adev);
if (r)
@@@ -4783,7 -4796,7 +4796,7 @@@ static int gfx_v11_0_hw_init(struct amd
adev->gfx.is_poweron = true;
if(get_gb_addr_config(adev))
- DRM_WARN("Invalid gb_addr_config !\n");
+ drm_warn(adev_to_drm(adev), "Invalid gb_addr_config !\n");
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
adev->gfx.rs64_enable)
@@@ -4901,7 -4914,7 +4914,7 @@@ static int gfx_v11_0_hw_fini(struct amd
if (amdgpu_gfx_disable_kcq(adev, 0))
DRM_ERROR("KCQ disable failed\n");
- amdgpu_mes_kiq_hw_fini(adev);
+ amdgpu_mes_kiq_hw_fini(adev, 0);
}
if (amdgpu_sriov_vf(adev))
@@@ -5568,7 -5581,8 +5581,8 @@@ static int gfx_v11_0_update_gfx_clock_g
return 0;
}
- static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
+ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, int xcc_id,
+ struct amdgpu_ring *ring, unsigned vmid)
{
u32 reg, pre_data, data;
@@@ -5633,6 -5647,7 +5647,7 @@@ static void gfx_v11_cntl_power_gating(s
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 5, 3):
+ case IP_VERSION(11, 5, 4):
WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
break;
default:
@@@ -5671,6 -5686,7 +5686,7 @@@ static int gfx_v11_0_set_powergating_st
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 5, 3):
+ case IP_VERSION(11, 5, 4):
if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
@@@ -5705,6 -5721,7 +5721,7 @@@ static int gfx_v11_0_set_clockgating_st
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
case IP_VERSION(11, 5, 3):
+ case IP_VERSION(11, 5, 4):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@@ -5831,25 -5848,13 +5848,13 @@@ static void gfx_v11_0_ring_emit_hdp_flu
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- switch (ring->me) {
- case 1:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
- break;
- case 2:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
- break;
- default:
- return;
- }
- reg_mem_engine = 0;
- } else {
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
- reg_mem_engine = 1; /* pfp */
+ if (!adev->gfx.funcs->get_hdp_flush_mask) {
+ dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
+ return;
}
+ adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, ®_mem_engine);
gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
@@@ -6664,7 -6669,7 +6669,7 @@@ static int gfx_v11_0_bad_op_irq(struct
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- DRM_ERROR("Illegal opcode in command stream \n");
+ DRM_ERROR("Illegal opcode in command stream\n");
gfx_v11_0_handle_priv_fault(adev, entry);
return 0;
}
@@@ -6828,7 -6833,7 +6833,7 @@@ static int gfx_v11_0_reset_kgq(struct a
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio, 0);
if (r) {
dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
@@@ -6844,7 -6849,7 +6849,7 @@@
return r;
}
- r = amdgpu_mes_map_legacy_queue(adev, ring);
+ r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
if (r) {
dev_err(adev->dev, "failed to remap kgq\n");
return r;
@@@ -6993,7 -6998,7 +6998,7 @@@ static int gfx_v11_0_reset_kcq(struct a
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true, 0);
if (r) {
dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
r = gfx_v11_0_reset_compute_pipe(ring);
@@@ -7006,7 -7011,7 +7011,7 @@@
dev_err(adev->dev, "fail to init kcq\n");
return r;
}
- r = amdgpu_mes_map_legacy_queue(adev, ring);
+ r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
if (r) {
dev_err(adev->dev, "failed to remap kcq\n");
return r;
@@@ -7480,7 -7485,7 +7485,7 @@@ static int gfx_v11_0_get_cu_info(struc
if (!adev || !cu_info)
return -EINVAL;
- amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
+ amdgpu_gfx_parse_disable_cu(adev, disable_masks, 8, 2);
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
diff --combined drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 4aab89a9ab401,79ea1af363a53..0000000000000
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@@ -355,7 -355,8 +355,8 @@@ static void gfx_v12_0_kiq_unmap_queues(
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
- amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
+ amdgpu_mes_unmap_legacy_queue(adev, ring, action,
+ gpu_addr, seq, 0);
return;
}
@@@ -458,8 -459,8 +459,8 @@@ static int gfx_v12_0_ring_test_ring(str
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 5);
if (r) {
- dev_err(adev->dev,
- "amdgpu: cp failed to lock ring %d (%d).\n",
+ drm_err(adev_to_drm(adev),
+ "cp failed to lock ring %d (%d).\n",
ring->idx, r);
return r;
}
@@@ -516,7 -517,7 +517,7 @@@ static int gfx_v12_0_ring_test_ib(struc
r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
- dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
+ drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r);
goto err1;
}
@@@ -760,7 -761,7 +761,7 @@@ static int gfx_v12_0_rlc_init(struct am
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0, NULL, 0xf);
return 0;
}
@@@ -908,10 -909,14 +909,14 @@@ static void gfx_v12_0_select_me_pipe_q(
static void gfx_v12_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
struct amdgpu_gfx_shadow_info *shadow_info)
{
+ /* for gfx */
shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+ /* for compute */
+ shadow_info->eop_size = GFX12_MEC_HPD_SIZE;
+ shadow_info->eop_alignment = 256;
}
static int gfx_v12_0_get_gfx_shadow_info(struct amdgpu_device *adev,
@@@ -936,6 -941,7 +941,7 @@@ static const struct amdgpu_gfx_funcs gf
.select_me_pipe_q = &gfx_v12_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v12_0_update_perf_clk,
.get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info,
+ .get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
};
static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev)
@@@ -3469,7 -3475,7 +3475,7 @@@ static int gfx_v12_0_cp_resume(struct a
}
if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
- r = amdgpu_mes_kiq_hw_init(adev);
+ r = amdgpu_mes_kiq_hw_init(adev, 0);
else
r = gfx_v12_0_kiq_resume(adev);
if (r)
@@@ -3650,7 -3656,7 +3656,7 @@@ static int gfx_v12_0_hw_init(struct amd
adev->gfx.is_poweron = true;
if (get_gb_addr_config(adev))
- DRM_WARN("Invalid gb_addr_config !\n");
+ drm_warn(adev_to_drm(adev), "Invalid gb_addr_config !\n");
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
gfx_v12_0_config_gfx_rs64(adev);
@@@ -3758,7 -3764,7 +3764,7 @@@ static int gfx_v12_0_hw_fini(struct amd
if (amdgpu_gfx_disable_kcq(adev, 0))
DRM_ERROR("KCQ disable failed\n");
- amdgpu_mes_kiq_hw_fini(adev);
+ amdgpu_mes_kiq_hw_fini(adev, 0);
}
if (amdgpu_sriov_vf(adev)) {
@@@ -3955,6 -3961,7 +3961,7 @@@ static void gfx_v12_0_update_perf_clk(s
}
static void gfx_v12_0_update_spm_vmid(struct amdgpu_device *adev,
+ int xcc_id,
struct amdgpu_ring *ring,
unsigned vmid)
{
@@@ -4386,25 -4393,13 +4393,13 @@@ static void gfx_v12_0_ring_emit_hdp_flu
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- switch (ring->me) {
- case 1:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
- break;
- case 2:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
- break;
- default:
- return;
- }
- reg_mem_engine = 0;
- } else {
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
- reg_mem_engine = 1; /* pfp */
+ if (!adev->gfx.funcs->get_hdp_flush_mask) {
+ dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
+ return;
}
+ adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, ®_mem_engine);
gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
@@@ -5040,7 -5035,7 +5035,7 @@@ static int gfx_v12_0_bad_op_irq(struct
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- DRM_ERROR("Illegal opcode in command stream \n");
+ DRM_ERROR("Illegal opcode in command stream\n");
gfx_v12_0_handle_priv_fault(adev, entry);
return 0;
}
@@@ -5302,7 -5297,7 +5297,7 @@@ static int gfx_v12_0_reset_kgq(struct a
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio, 0);
if (r) {
dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
r = gfx_v12_reset_gfx_pipe(ring);
@@@ -5317,7 -5312,7 +5312,7 @@@
return r;
}
- r = amdgpu_mes_map_legacy_queue(adev, ring);
+ r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
if (r) {
dev_err(adev->dev, "failed to remap kgq\n");
return r;
@@@ -5419,7 -5414,7 +5414,7 @@@ static int gfx_v12_0_reset_kcq(struct a
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true, 0);
if (r) {
dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
r = gfx_v12_0_reset_compute_pipe(ring);
@@@ -5432,7 -5427,7 +5427,7 @@@
dev_err(adev->dev, "failed to init kcq\n");
return r;
}
- r = amdgpu_mes_map_legacy_queue(adev, ring);
+ r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
if (r) {
dev_err(adev->dev, "failed to remap kcq\n");
return r;
@@@ -5724,7 -5719,7 +5719,7 @@@ static int gfx_v12_0_get_cu_info(struc
if (!adev || !cu_info)
return -EINVAL;
- amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
+ amdgpu_gfx_parse_disable_cu(adev, disable_masks, 8, 2);
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
© 2016 - 2026 Red Hat, Inc.