Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2d2faaf0 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'hns3-add-support-for-reset'



Lipeng says:

====================
net: hns3: add support for reset

There are 4 reset types for HNS3 PF driver, include global reset,
core reset, IMP reset, PF reset.The core reset will reset all datapath
of all functions except IMP, MAC and PCI interface. Global reset is equal
with the core reset plus all MAC reset. IMP reset is caused by watchdog
timer expiration, the same range with core reset. PF reset will reset
whole physical function.

This patchset adds reset support for hns3 driver and fix some related bugs.

---
Change log:
V1 -> V2:
1, fix some comments from Yunsheng Lin.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9691cea9 c6dc5213
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -110,6 +110,21 @@ enum hnae3_media_type {
	HNAE3_MEDIA_TYPE_BACKPLANE,
};

enum hnae3_reset_notify_type {
	HNAE3_UP_CLIENT,
	HNAE3_DOWN_CLIENT,
	HNAE3_INIT_CLIENT,
	HNAE3_UNINIT_CLIENT,
};

enum hnae3_reset_type {
	HNAE3_FUNC_RESET,
	HNAE3_CORE_RESET,
	HNAE3_GLOBAL_RESET,
	HNAE3_IMP_RESET,
	HNAE3_NONE_RESET,
};

struct hnae3_vector_info {
	u8 __iomem *io_addr;
	int vector;
@@ -133,6 +148,8 @@ struct hnae3_client_ops {
	void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
	void (*link_status_change)(struct hnae3_handle *handle, bool state);
	int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
	int (*reset_notify)(struct hnae3_handle *handle,
			    enum hnae3_reset_notify_type type);
};

#define HNAE3_CLIENT_NAME_LENGTH 16
@@ -367,6 +384,8 @@ struct hnae3_ae_ops {
			       u16 vlan_id, bool is_kill);
	int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
				  u16 vlan, u8 qos, __be16 proto);
	void (*reset_event)(struct hnae3_handle *handle,
			    enum hnae3_reset_type reset);
};

struct hnae3_dcb_ops {
+24 −15
Original line number Diff line number Diff line
@@ -62,7 +62,7 @@ static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
	ring->desc = NULL;
}

static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type)
static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
{
	struct hclge_hw *hw = &hdev->hw;
	struct hclge_cmq_ring *ring =
@@ -79,9 +79,6 @@ static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type)
		return ret;
	}

	ring->next_to_clean = 0;
	ring->next_to_use = 0;

	return 0;
}

@@ -302,37 +299,52 @@ static enum hclge_cmd_status hclge_cmd_query_firmware_version(
	return ret;
}

int hclge_cmd_init(struct hclge_dev *hdev)
int hclge_cmd_queue_init(struct hclge_dev *hdev)
{
	u32 version;
	int ret;

	/* Setup the queue entries for use cmd queue */
	hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
	hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;

	/* Setup the lock for command queue */
	spin_lock_init(&hdev->hw.cmq.csq.lock);
	spin_lock_init(&hdev->hw.cmq.crq.lock);

	/* Setup Tx write back timeout */
	hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;

	/* Setup queue rings */
	ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CSQ);
	ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"CSQ ring setup error %d\n", ret);
		return ret;
	}

	ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CRQ);
	ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"CRQ ring setup error %d\n", ret);
		goto err_csq;
	}

	return 0;
err_csq:
	hclge_free_cmd_desc(&hdev->hw.cmq.csq);
	return ret;
}

int hclge_cmd_init(struct hclge_dev *hdev)
{
	u32 version;
	int ret;

	hdev->hw.cmq.csq.next_to_clean = 0;
	hdev->hw.cmq.csq.next_to_use = 0;
	hdev->hw.cmq.crq.next_to_clean = 0;
	hdev->hw.cmq.crq.next_to_use = 0;

	/* Setup the lock for command queue */
	spin_lock_init(&hdev->hw.cmq.csq.lock);
	spin_lock_init(&hdev->hw.cmq.crq.lock);

	hclge_cmd_init_regs(&hdev->hw);

	ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
@@ -346,9 +358,6 @@ int hclge_cmd_init(struct hclge_dev *hdev)
	dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);

	return 0;
err_csq:
	hclge_free_cmd_desc(&hdev->hw.cmq.csq);
	return ret;
}

static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
+13 −0
Original line number Diff line number Diff line
@@ -63,6 +63,11 @@ enum hclge_cmd_status {
	HCLGE_ERR_CSQ_ERROR	= -3,
};

struct hclge_misc_vector {
	u8 __iomem *addr;
	int vector_irq;
};

struct hclge_cmq {
	struct hclge_cmq_ring csq;
	struct hclge_cmq_ring crq;
@@ -692,6 +697,13 @@ struct hclge_reset_tqp_queue_cmd {
	u8 rsv[20];
};

#define HCLGE_CFG_RESET_MAC_B		3
#define HCLGE_CFG_RESET_FUNC_B		7
struct hclge_reset_cmd {
	u8 mac_func_reset;
	u8 fun_reset_vfid;
	u8 rsv[22];
};
#define HCLGE_DEFAULT_TX_BUF		0x4000	 /* 16k  bytes */
#define HCLGE_TOTAL_PKT_BUF		0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV		0xA000	 /* 40k byte */
@@ -750,4 +762,5 @@ enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
					  struct hclge_desc *desc);

void hclge_destroy_cmd_queue(struct hclge_hw *hw);
int hclge_cmd_queue_init(struct hclge_dev *hdev);
#endif
+431 −19
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
				     enum hclge_mta_dmac_sel_type mta_mac_sel,
				     bool enable);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);

static struct hnae3_ae_algo ae_algo;

@@ -1184,11 +1185,7 @@ static int hclge_assign_tqp(struct hclge_vport *vport,
			     struct hnae3_queue **tqp, u16 num_tqps)
{
	struct hclge_dev *hdev = vport->back;
	int i, alloced, func_id, ret;
	bool is_pf;

	func_id = vport->vport_id;
	is_pf = (vport->vport_id == 0) ? true : false;
	int i, alloced;

	for (i = 0, alloced = 0; i < hdev->num_tqps &&
	     alloced < num_tqps; i++) {
@@ -1197,12 +1194,6 @@ static int hclge_assign_tqp(struct hclge_vport *vport,
			hdev->htqp[i].q.tqp_index = alloced;
			tqp[alloced] = &hdev->htqp[i].q;
			hdev->htqp[i].alloced = true;
			ret = hclge_map_tqps_to_func(hdev, func_id,
						     hdev->htqp[i].index,
						     alloced, is_pf);
			if (ret)
				return ret;

			alloced++;
		}
	}
@@ -1254,6 +1245,49 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
	return 0;
}

static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
				  struct hclge_vport *vport)
{
	struct hnae3_handle *nic = &vport->nic;
	struct hnae3_knic_private_info *kinfo;
	u16 i;

	kinfo = &nic->kinfo;
	for (i = 0; i < kinfo->num_tqps; i++) {
		struct hclge_tqp *q =
			container_of(kinfo->tqp[i], struct hclge_tqp, q);
		bool is_pf;
		int ret;

		is_pf = !(vport->vport_id);
		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
					     i, is_pf);
		if (ret)
			return ret;
	}

	return 0;
}

static int hclge_map_tqp(struct hclge_dev *hdev)
{
	struct hclge_vport *vport = hdev->vport;
	u16 i, num_vport;

	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
	for (i = 0; i < num_vport; i++)	{
		int ret;

		ret = hclge_map_tqp_to_vport(hdev, vport);
		if (ret)
			return ret;

		vport++;
	}

	return 0;
}

static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
{
	/* this would be initialized later */
@@ -2195,13 +2229,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)

	mac->link = 0;

	ret = hclge_mac_mdio_config(hdev);
	if (ret) {
		dev_warn(&hdev->pdev->dev,
			 "mdio config fail ret=%d\n", ret);
		return ret;
	}

	/* Initialize the MTA table work mode */
	hdev->accept_mta_mc	= true;
	hdev->enable_mta	= true;
@@ -2366,11 +2393,275 @@ static void hclge_service_complete(struct hclge_dev *hdev)
	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}

static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
{
	writel(enable ? 1 : 0, vector->addr);
}

static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
	struct hclge_dev *hdev = data;

	hclge_enable_vector(&hdev->misc_vector, false);
	if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
		schedule_work(&hdev->service_task);

	return IRQ_HANDLED;
}

static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
{
	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
	hdev->num_msi_left += 1;
	hdev->num_msi_used -= 1;
}

static void hclge_get_misc_vector(struct hclge_dev *hdev)
{
	struct hclge_misc_vector *vector = &hdev->misc_vector;

	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);

	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
	hdev->vector_status[0] = 0;

	hdev->num_msi_left -= 1;
	hdev->num_msi_used += 1;
}

static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
	int ret;

	hclge_get_misc_vector(hdev);

	ret = devm_request_irq(&hdev->pdev->dev,
			       hdev->misc_vector.vector_irq,
			       hclge_misc_irq_handle, 0, "hclge_misc", hdev);
	if (ret) {
		hclge_free_vector(hdev, 0);
		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
			hdev->misc_vector.vector_irq);
	}

	return ret;
}

static int hclge_notify_client(struct hclge_dev *hdev,
			       enum hnae3_reset_notify_type type)
{
	struct hnae3_client *client = hdev->nic_client;
	u16 i;

	if (!client->ops->reset_notify)
		return -EOPNOTSUPP;

	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
		struct hnae3_handle *handle = &hdev->vport[i].nic;
		int ret;

		ret = client->ops->reset_notify(handle, type);
		if (ret)
			return ret;
	}

	return 0;
}

static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS	100
#define HCLGE_RESET_WAIT_CNT	5
	u32 val, reg, reg_bit;
	u32 cnt = 0;

	switch (hdev->reset_type) {
	case HNAE3_GLOBAL_RESET:
		reg = HCLGE_GLOBAL_RESET_REG;
		reg_bit = HCLGE_GLOBAL_RESET_BIT;
		break;
	case HNAE3_CORE_RESET:
		reg = HCLGE_GLOBAL_RESET_REG;
		reg_bit = HCLGE_CORE_RESET_BIT;
		break;
	case HNAE3_FUNC_RESET:
		reg = HCLGE_FUN_RST_ING;
		reg_bit = HCLGE_FUN_RST_ING_B;
		break;
	default:
		dev_err(&hdev->pdev->dev,
			"Wait for unsupported reset type: %d\n",
			hdev->reset_type);
		return -EINVAL;
	}

	val = hclge_read_dev(&hdev->hw, reg);
	while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
		msleep(HCLGE_RESET_WATI_MS);
		val = hclge_read_dev(&hdev->hw, reg);
		cnt++;
	}

	/* must clear reset status register to
	 * prevent driver detect reset interrupt again
	 */
	reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);

	if (cnt >= HCLGE_RESET_WAIT_CNT) {
		dev_warn(&hdev->pdev->dev,
			 "Wait for reset timeout: %d\n", hdev->reset_type);
		return -EBUSY;
	}

	return 0;
}

static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
	struct hclge_desc desc;
	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
	hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
	hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
	req->fun_reset_vfid = func_id;

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"send function reset cmd fail, status =%d\n", ret);

	return ret;
}

static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
{
	struct pci_dev *pdev = hdev->pdev;
	u32 val;

	switch (type) {
	case HNAE3_GLOBAL_RESET:
		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
		hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
		dev_info(&pdev->dev, "Global Reset requested\n");
		break;
	case HNAE3_CORE_RESET:
		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
		hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
		dev_info(&pdev->dev, "Core Reset requested\n");
		break;
	case HNAE3_FUNC_RESET:
		dev_info(&pdev->dev, "PF Reset requested\n");
		hclge_func_reset_cmd(hdev, 0);
		break;
	default:
		dev_warn(&pdev->dev,
			 "Unsupported reset type: %d\n", type);
		break;
	}
}

static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
{
	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
	u32 rst_reg_val;

	rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
		rst_level = HNAE3_GLOBAL_RESET;
	else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
		rst_level = HNAE3_CORE_RESET;
	else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
		rst_level = HNAE3_IMP_RESET;

	return rst_level;
}

static void hclge_reset_event(struct hnae3_handle *handle,
			      enum hnae3_reset_type reset)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	dev_info(&hdev->pdev->dev,
		 "Receive reset event , reset_type is %d", reset);

	switch (reset) {
	case HNAE3_FUNC_RESET:
	case HNAE3_CORE_RESET:
	case HNAE3_GLOBAL_RESET:
		if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) {
			dev_err(&hdev->pdev->dev, "Already in reset state");
			return;
		}
		hdev->reset_type = reset;
		set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
		set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
		schedule_work(&hdev->service_task);
		break;
	default:
		dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
		break;
	}
}

static void hclge_reset_subtask(struct hclge_dev *hdev)
{
	bool do_reset;

	do_reset = hdev->reset_type != HNAE3_NONE_RESET;

	/* Reset is detected by interrupt */
	if (hdev->reset_type == HNAE3_NONE_RESET)
		hdev->reset_type = hclge_detected_reset_event(hdev);

	if (hdev->reset_type == HNAE3_NONE_RESET)
		return;

	switch (hdev->reset_type) {
	case HNAE3_FUNC_RESET:
	case HNAE3_CORE_RESET:
	case HNAE3_GLOBAL_RESET:
	case HNAE3_IMP_RESET:
		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);

		if (do_reset)
			hclge_do_reset(hdev, hdev->reset_type);
		else
			set_bit(HCLGE_STATE_RESET_INT, &hdev->state);

		if (!hclge_reset_wait(hdev)) {
			hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
			hclge_reset_ae_dev(hdev->ae_dev);
			hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
			clear_bit(HCLGE_STATE_RESET_INT, &hdev->state);
		}
		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
		break;
	default:
		dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n",
			hdev->reset_type);
		break;
	}
	hdev->reset_type = HNAE3_NONE_RESET;
}

static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
{
	hclge_reset_subtask(hdev);
	hclge_enable_vector(&hdev->misc_vector, true);
}

static void hclge_service_task(struct work_struct *work)
{
	struct hclge_dev *hdev =
		container_of(work, struct hclge_dev, service_task);

	hclge_misc_irq_service_task(hdev);
	hclge_update_speed_duplex(hdev);
	hclge_update_link_status(hdev);
	hclge_update_stats_for_all(hdev);
@@ -4412,6 +4703,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
	hdev->flag |= HCLGE_FLAG_USE_MSIX;
	hdev->pdev = pdev;
	hdev->ae_dev = ae_dev;
	hdev->reset_type = HNAE3_NONE_RESET;
	ae_dev->priv = hdev;

	ret = hclge_pci_init(hdev);
@@ -4420,7 +4712,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
		goto err_pci_init;
	}

	/* Command queue initialize */
	/* Firmware command queue initialize */
	ret = hclge_cmd_queue_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
		return ret;
	}

	/* Firmware command initialize */
	ret = hclge_cmd_init(hdev);
	if (ret)
		goto err_cmd_init;
@@ -4447,6 +4746,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
		return ret;
	}

	ret = hclge_misc_irq_init(hdev);
	if (ret) {
		dev_err(&pdev->dev,
			"Misc IRQ(vector0) init error, ret = %d.\n",
			ret);
		return ret;
	}

	ret = hclge_alloc_tqps(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
@@ -4459,6 +4766,19 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
		return ret;
	}

	ret = hclge_map_tqp(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
		return ret;
	}

	ret = hclge_mac_mdio_config(hdev);
	if (ret) {
		dev_warn(&hdev->pdev->dev,
			 "mdio config fail ret=%d\n", ret);
		return ret;
	}

	ret = hclge_mac_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -4499,6 +4819,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
	INIT_WORK(&hdev->service_task, hclge_service_task);

	/* Enable MISC vector(vector0) */
	hclge_enable_vector(&hdev->misc_vector, true);

	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
	set_bit(HCLGE_STATE_DOWN, &hdev->state);

@@ -4513,6 +4836,91 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
	return ret;
}

static void hclge_stats_clear(struct hclge_dev *hdev)
{
	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
}

static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
{
	struct hclge_dev *hdev = ae_dev->priv;
	struct pci_dev *pdev = ae_dev->pdev;
	int ret;

	set_bit(HCLGE_STATE_DOWN, &hdev->state);

	hclge_stats_clear(hdev);

	ret = hclge_cmd_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Cmd queue init failed\n");
		return ret;
	}

	ret = hclge_get_cap(hdev);
	if (ret) {
		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
			ret);
		return ret;
	}

	ret = hclge_configure(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
		return ret;
	}

	ret = hclge_map_tqp(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
		return ret;
	}

	ret = hclge_mac_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
		return ret;
	}

	ret = hclge_buffer_alloc(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
		return ret;
	}

	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
	if (ret) {
		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
		return ret;
	}

	ret = hclge_init_vlan_config(hdev);
	if (ret) {
		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
		return ret;
	}

	ret = hclge_tm_schd_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
		return ret;
	}

	ret = hclge_rss_init_hw(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
		return ret;
	}

	/* Enable MISC vector(vector0) */
	hclge_enable_vector(&hdev->misc_vector, true);

	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
		 HCLGE_DRIVER_NAME);

	return 0;
}

static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
{
	struct hclge_dev *hdev = ae_dev->priv;
@@ -4531,6 +4939,9 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
	if (mac->phydev)
		mdiobus_unregister(mac->mdio_bus);

	/* Disable MISC vector(vector0) */
	hclge_enable_vector(&hdev->misc_vector, false);
	hclge_free_vector(hdev, 0);
	hclge_destroy_cmd_queue(&hdev->hw);
	hclge_pci_uninit(hdev);
	ae_dev->priv = NULL;
@@ -4579,6 +4990,7 @@ static const struct hnae3_ae_ops hclge_ops = {
	.get_mdix_mode = hclge_get_mdix_mode,
	.set_vlan_filter = hclge_set_port_vlan_filter,
	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
	.reset_event = hclge_reset_event,
};

static struct hnae3_ae_algo ae_algo = {
+17 −0
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
	(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)

#define HCLGE_VECTOR_REG_BASE		0x20000
#define HCLGE_MISC_VECTOR_REG_BASE	0x20400

#define HCLGE_VECTOR_REG_OFFSET		0x4
#define HCLGE_VECTOR_VF_OFFSET		0x100000
@@ -78,6 +79,19 @@
#define HCLGE_PHY_MDIX_STATUS_B	(6)
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B	(11)

/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG	0x20700
#define HCLGE_GLOBAL_RESET_REG		0x20A00
#define HCLGE_GLOBAL_RESET_BIT		0x0
#define HCLGE_CORE_RESET_BIT		0x1
#define HCLGE_FUN_RST_ING		0x20C00
#define HCLGE_FUN_RST_ING_B		0

/* Vector0 register bits define */
#define HCLGE_VECTOR0_GLOBALRESET_INT_B	5
#define HCLGE_VECTOR0_CORERESET_INT_B	6
#define HCLGE_VECTOR0_IMPRESET_INT_B	7

enum HCLGE_DEV_STATE {
	HCLGE_STATE_REINITING,
	HCLGE_STATE_DOWN,
@@ -87,6 +101,7 @@ enum HCLGE_DEV_STATE {
	HCLGE_STATE_SERVICE_SCHED,
	HCLGE_STATE_MBX_HANDLING,
	HCLGE_STATE_MBX_IRQ,
	HCLGE_STATE_RESET_INT,
	HCLGE_STATE_MAX
};

@@ -400,9 +415,11 @@ struct hclge_dev {
	struct pci_dev *pdev;
	struct hnae3_ae_dev *ae_dev;
	struct hclge_hw hw;
	struct hclge_misc_vector misc_vector;
	struct hclge_hw_stats hw_stats;
	unsigned long state;

	enum hnae3_reset_type reset_type;
	u32 fw_version;
	u16 num_vmdq_vport;		/* Num vmdq vport this PF has set up */
	u16 num_tqps;			/* Num task queue pairs of this PF */
Loading