Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 106c7d61 authored by Likun Gao's avatar Likun Gao Committed by Alex Deucher
Browse files

drm/amdgpu: abstract the function of enter/exit safe mode for RLC



Abstract the function of amdgpu_gfx_rlc_enter/exit_safe_mode and some part of
rlc_init to improve the reusability of RLC.

Signed-off-by: default avatarLikun Gao <Likun.Gao@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 88dfc9a3
Loading
Loading
Loading
Loading
+227 −2
Original line number Original line Diff line number Diff line

/*
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 * Copyright 2014 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2008 Red Hat Inc.
@@ -23,11 +22,237 @@
 * OTHER DEALINGS IN THE SOFTWARE.
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
 */

#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
#include "amdgpu_rlc.h"


/**
 * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
 *
 * @adev: amdgpu_device pointer
 *
 * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
 */
void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
{
	if (adev->gfx.rlc.in_safe_mode)
		return;

	/* if RLC is not enabled, do nothing */
	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
		return;

	if (adev->cg_flags &
	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
		adev->gfx.rlc.funcs->set_safe_mode(adev);
		adev->gfx.rlc.in_safe_mode = true;
	}
}

/**
 * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
 *
 * @adev: amdgpu_device pointer
 *
 * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
 */
void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
{
	if (!(adev->gfx.rlc.in_safe_mode))
		return;

	/* if RLC is not enabled, do nothing */
	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
		return;

	if (adev->cg_flags &
	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
		adev->gfx.rlc.funcs->unset_safe_mode(adev);
		adev->gfx.rlc.in_safe_mode = false;
	}
}

/**
 * amdgpu_gfx_rlc_init_sr - Init save restore block
 *
 * @adev: amdgpu_device pointer
 * @dws: the size of save restore block
 *
 * Allocate and setup value to save restore block of rlc.
 * Returns 0 on succeess or negative error code if allocate failed.
 */
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
{
	const u32 *src_ptr;
	volatile u32 *dst_ptr;
	u32 i;
	int r;

	/* allocate save restore block */
	r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
				      AMDGPU_GEM_DOMAIN_VRAM,
				      &adev->gfx.rlc.save_restore_obj,
				      &adev->gfx.rlc.save_restore_gpu_addr,
				      (void **)&adev->gfx.rlc.sr_ptr);
	if (r) {
		dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
		amdgpu_gfx_rlc_fini(adev);
		return r;
	}

	/* write the sr buffer */
	src_ptr = adev->gfx.rlc.reg_list;
	dst_ptr = adev->gfx.rlc.sr_ptr;
	for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
		dst_ptr[i] = cpu_to_le32(src_ptr[i]);
	amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
	amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);

	return 0;
}

/**
 * amdgpu_gfx_rlc_init_csb - Init clear state block
 *
 * @adev: amdgpu_device pointer
 *
 * Allocate and setup value to clear state block of rlc.
 * Returns 0 on succeess or negative error code if allocate failed.
 */
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
{
	volatile u32 *dst_ptr;
	u32 dws;
	int r;

	/* allocate clear state block */
	adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
	r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
				      AMDGPU_GEM_DOMAIN_VRAM,
				      &adev->gfx.rlc.clear_state_obj,
				      &adev->gfx.rlc.clear_state_gpu_addr,
				      (void **)&adev->gfx.rlc.cs_ptr);
	if (r) {
		dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
		amdgpu_gfx_rlc_fini(adev);
		return r;
	}

	/* set up the cs buffer */
	dst_ptr = adev->gfx.rlc.cs_ptr;
	adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
	amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
	amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
	amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);

	return 0;
}

/**
 * amdgpu_gfx_rlc_init_cpt - Init cp table
 *
 * @adev: amdgpu_device pointer
 *
 * Allocate and setup value to cp table of rlc.
 * Returns 0 on succeess or negative error code if allocate failed.
 */
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
{
	int r;

	r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
				      &adev->gfx.rlc.cp_table_obj,
				      &adev->gfx.rlc.cp_table_gpu_addr,
				      (void **)&adev->gfx.rlc.cp_table_ptr);
	if (r) {
		dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
		amdgpu_gfx_rlc_fini(adev);
		return r;
	}

	/* set up the cp table */
	amdgpu_gfx_rlc_setup_cp_table(adev);
	amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
	amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);

	return 0;
}

/**
 * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
 *
 * @adev: amdgpu_device pointer
 *
 * Write cp firmware data into cp table.
 */
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
{
	const __le32 *fw_data;
	volatile u32 *dst_ptr;
	int me, i, max_me;
	u32 bo_offset = 0;
	u32 table_offset, table_size;

	max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);

	/* write the cp table buffer */
	dst_ptr = adev->gfx.rlc.cp_table_ptr;
	for (me = 0; me < max_me; me++) {
		if (me == 0) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.ce_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else if (me == 1) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.pfp_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else if (me == 2) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.me_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else if (me == 3) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.mec_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else  if (me == 4) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.mec2_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		}

		for (i = 0; i < table_size; i ++) {
			dst_ptr[bo_offset + i] =
				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
		}

		bo_offset += table_size;
	}
}

/**
/**
 * amdgpu_gfx_rlc_fini - Free BO which used for RLC
 * amdgpu_gfx_rlc_fini - Free BO which used for RLC
 *
 *
+21 −12
Original line number Original line Diff line number Diff line

/*
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 *
@@ -28,9 +27,13 @@
#include "clearstate_defs.h"
#include "clearstate_defs.h"


struct amdgpu_rlc_funcs {
struct amdgpu_rlc_funcs {
	void (*enter_safe_mode)(struct amdgpu_device *adev);
	bool (*is_rlc_enabled)(struct amdgpu_device *adev);
	void (*exit_safe_mode)(struct amdgpu_device *adev);
	void (*set_safe_mode)(struct amdgpu_device *adev);
	void (*unset_safe_mode)(struct amdgpu_device *adev);
	int  (*init)(struct amdgpu_device *adev);
	int  (*init)(struct amdgpu_device *adev);
	u32  (*get_csb_size)(struct amdgpu_device *adev);
	void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
	int  (*get_cp_table_num)(struct amdgpu_device *adev);
	int  (*resume)(struct amdgpu_device *adev);
	int  (*resume)(struct amdgpu_device *adev);
	void (*stop)(struct amdgpu_device *adev);
	void (*stop)(struct amdgpu_device *adev);
	void (*reset)(struct amdgpu_device *adev);
	void (*reset)(struct amdgpu_device *adev);
@@ -84,6 +87,12 @@ struct amdgpu_rlc {
	bool is_rlc_v2_1;
	bool is_rlc_v2_1;
};
};


void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);


#endif
#endif
+3 −3
Original line number Original line Diff line number Diff line
@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)


	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
		adev->gfx.rlc.funcs->enter_safe_mode(adev);
		amdgpu_gfx_rlc_enter_safe_mode(adev);


		if (enable) {
		if (enable) {
			ret = ci_program_pt_config_registers(adev, didt_config_ci);
			ret = ci_program_pt_config_registers(adev, didt_config_ci);
			if (ret) {
			if (ret) {
				adev->gfx.rlc.funcs->exit_safe_mode(adev);
				amdgpu_gfx_rlc_exit_safe_mode(adev);
				return ret;
				return ret;
			}
			}
		}
		}


		ci_do_enable_didt(adev, enable);
		ci_do_enable_didt(adev, enable);


		adev->gfx.rlc.funcs->exit_safe_mode(adev);
		amdgpu_gfx_rlc_exit_safe_mode(adev);
	}
	}


	return 0;
	return 0;
+4 −20
Original line number Original line Diff line number Diff line
@@ -2355,7 +2355,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{
{
	const u32 *src_ptr;
	const u32 *src_ptr;
	volatile u32 *dst_ptr;
	volatile u32 *dst_ptr;
	u32 dws, i;
	u32 dws;
	u64 reg_list_mc_addr;
	u64 reg_list_mc_addr;
	const struct cs_section_def *cs_data;
	const struct cs_section_def *cs_data;
	int r;
	int r;
@@ -2370,28 +2370,12 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
	cs_data = adev->gfx.rlc.cs_data;
	cs_data = adev->gfx.rlc.cs_data;


	if (src_ptr) {
	if (src_ptr) {
		/* save restore block */
		/* init save restore block */
		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
		r = amdgpu_gfx_rlc_init_sr(adev, dws);
					      AMDGPU_GEM_DOMAIN_VRAM,
		if (r)
					      &adev->gfx.rlc.save_restore_obj,
					      &adev->gfx.rlc.save_restore_gpu_addr,
					      (void **)&adev->gfx.rlc.sr_ptr);
		if (r) {
			dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
				 r);
			amdgpu_gfx_rlc_fini(adev);
			return r;
			return r;
	}
	}


		/* write the sr buffer */
		dst_ptr = adev->gfx.rlc.sr_ptr;
		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
			dst_ptr[i] = cpu_to_le32(src_ptr[i]);

		amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
	}

	if (cs_data) {
	if (cs_data) {
		/* clear state block */
		/* clear state block */
		adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
		adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
+26 −122
Original line number Original line Diff line number Diff line
@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] =


static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);


@@ -3255,8 +3254,7 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
{
{
	const u32 *src_ptr;
	const u32 *src_ptr;
	volatile u32 *dst_ptr;
	u32 dws;
	u32 dws, i;
	const struct cs_section_def *cs_data;
	const struct cs_section_def *cs_data;
	int r;
	int r;


@@ -3283,68 +3281,25 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
	cs_data = adev->gfx.rlc.cs_data;
	cs_data = adev->gfx.rlc.cs_data;


	if (src_ptr) {
	if (src_ptr) {
		/* save restore block */
		/* init save restore block */
		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
		r = amdgpu_gfx_rlc_init_sr(adev, dws);
					      AMDGPU_GEM_DOMAIN_VRAM,
		if (r)
					      &adev->gfx.rlc.save_restore_obj,
					      &adev->gfx.rlc.save_restore_gpu_addr,
					      (void **)&adev->gfx.rlc.sr_ptr);
		if (r) {
			dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
			amdgpu_gfx_rlc_fini(adev);
			return r;
			return r;
	}
	}


		/* write the sr buffer */
		dst_ptr = adev->gfx.rlc.sr_ptr;
		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
			dst_ptr[i] = cpu_to_le32(src_ptr[i]);
		amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
		amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
	}

	if (cs_data) {
	if (cs_data) {
		/* clear state block */
		/* init clear state block */
		adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
		r = amdgpu_gfx_rlc_init_csb(adev);

		if (r)
		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
					      AMDGPU_GEM_DOMAIN_VRAM,
					      &adev->gfx.rlc.clear_state_obj,
					      &adev->gfx.rlc.clear_state_gpu_addr,
					      (void **)&adev->gfx.rlc.cs_ptr);
		if (r) {
			dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
			amdgpu_gfx_rlc_fini(adev);
			return r;
			return r;
	}
	}


		/* set up the cs buffer */
		dst_ptr = adev->gfx.rlc.cs_ptr;
		gfx_v7_0_get_csb_buffer(adev, dst_ptr);
		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
	}

	if (adev->gfx.rlc.cp_table_size) {
	if (adev->gfx.rlc.cp_table_size) {

		r = amdgpu_gfx_rlc_init_cpt(adev);
		r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
		if (r)
					      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
					      &adev->gfx.rlc.cp_table_obj,
					      &adev->gfx.rlc.cp_table_gpu_addr,
					      (void **)&adev->gfx.rlc.cp_table_ptr);
		if (r) {
			dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
			amdgpu_gfx_rlc_fini(adev);
			return r;
			return r;
	}
	}


		gfx_v7_0_init_cp_pg_table(adev);

		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);

	}

	return 0;
	return 0;
}
}


@@ -3423,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
	return orig;
	return orig;
}
}


static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
{
	return true;
}

static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
{
{
	u32 tmp, i, mask;
	u32 tmp, i, mask;


@@ -3445,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
	}
	}
}
}


static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
{
{
	u32 tmp;
	u32 tmp;


@@ -3761,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
		WREG32(mmRLC_PG_CNTL, data);
		WREG32(mmRLC_PG_CNTL, data);
}
}


static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
{
{
	const __le32 *fw_data;
	volatile u32 *dst_ptr;
	int me, i, max_me = 4;
	u32 bo_offset = 0;
	u32 table_offset, table_size;

	if (adev->asic_type == CHIP_KAVERI)
	if (adev->asic_type == CHIP_KAVERI)
		max_me = 5;
		return 5;

	else
	if (adev->gfx.rlc.cp_table_ptr == NULL)
		return 4;
		return;

	/* write the cp table buffer */
	dst_ptr = adev->gfx.rlc.cp_table_ptr;
	for (me = 0; me < max_me; me++) {
		if (me == 0) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.ce_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else if (me == 1) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.pfp_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else if (me == 2) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.me_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else if (me == 3) {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.mec_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		} else {
			const struct gfx_firmware_header_v1_0 *hdr =
				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
			fw_data = (const __le32 *)
				(adev->gfx.mec2_fw->data +
				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			table_offset = le32_to_cpu(hdr->jt_offset);
			table_size = le32_to_cpu(hdr->jt_size);
		}

		for (i = 0; i < table_size; i ++) {
			dst_ptr[bo_offset + i] =
				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
		}

		bo_offset += table_size;
	}
}
}


static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
@@ -4265,9 +4165,13 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
};
};


static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
	.enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
	.is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
	.exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode,
	.set_safe_mode = gfx_v7_0_set_safe_mode,
	.unset_safe_mode = gfx_v7_0_unset_safe_mode,
	.init = gfx_v7_0_rlc_init,
	.init = gfx_v7_0_rlc_init,
	.get_csb_size = gfx_v7_0_get_csb_size,
	.get_csb_buffer = gfx_v7_0_get_csb_buffer,
	.get_cp_table_num = gfx_v7_0_cp_pg_table_num,
	.resume = gfx_v7_0_rlc_resume,
	.resume = gfx_v7_0_rlc_resume,
	.stop = gfx_v7_0_rlc_stop,
	.stop = gfx_v7_0_rlc_stop,
	.reset = gfx_v7_0_rlc_reset,
	.reset = gfx_v7_0_rlc_reset,
Loading