Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 13ca970e authored by Wei Hu(Xavier)'s avatar Wei Hu(Xavier) Committed by Doug Ledford
Browse files

RDMA/hns: Modify assignment device variable to support both PCI device and platform device



In order to support the scalability of the hardware version, the
features irrelevant to the hardware will be located in the hns-roce.ko,
and the hardware relevant operations will be located in hns_roce_hw_v1.ko
or hns_roce_hw_v2.ko based on the series chips.

The hip08 RoCE engine is a PCI device, hip06 RoCE engine is a platform
device. In order to support both platform device and PCI device, We
replace &hr_dev->pdev->dev with hr_dev->dev in hns-roce.ko as belows:
	Before modification:
		struct device *dev = hr_dev->dev;
	After modification:
		struct device *dev = &hr_dev->pdev->dev;

	The related structure:
	struct hns_roce_dev {
		...
		struct platform_device  *pdev;
		struct pci_dev		*pci_dev;
		struct device		*dev;
		...
	}

Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarShaobo Xu <xushaobo2@huawei.com>
Signed-off-by: default avatarWei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent dd74282d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -44,7 +44,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
				 struct ib_udata *udata)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	struct ib_gid_attr gid_attr;
	struct hns_roce_ah *ah;
	u16 vlan_tag = 0xffff;
+3 −3
Original line number Diff line number Diff line
@@ -161,7 +161,7 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
		       struct hns_roce_buf *buf)
{
	int i;
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	u32 bits_per_long = BITS_PER_LONG;

	if (buf->nbufs == 1) {
@@ -172,7 +172,7 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,

		for (i = 0; i < buf->nbufs; ++i)
			if (buf->page_list[i].buf)
				dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
				dma_free_coherent(dev, PAGE_SIZE,
						  buf->page_list[i].buf,
						  buf->page_list[i].map);
		kfree(buf->page_list);
@@ -186,7 +186,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
	int i = 0;
	dma_addr_t t;
	struct page **pages;
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	u32 bits_per_long = BITS_PER_LONG;

	/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
+3 −3
Original line number Diff line number Diff line
@@ -125,7 +125,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
				    u8 op_modifier, u16 op,
				    unsigned long timeout)
{
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	u8 __iomem *hcr = hr_dev->cmd.hcr;
	unsigned long end = 0;
	u32 status = 0;
@@ -196,8 +196,8 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
				    unsigned long timeout)
{
	struct hns_roce_cmdq *cmd = &hr_dev->cmd;
	struct device *dev = &hr_dev->pdev->dev;
	struct hns_roce_cmd_context *context;
	struct device *dev = hr_dev->dev;
	int ret = 0;

	spin_lock(&cmd->context_lock);
@@ -273,7 +273,7 @@ EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);

int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
{
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;

	mutex_init(&hr_dev->cmd.hcr_mutex);
	sema_init(&hr_dev->cmd.poll_sem, 1);
+6 −6
Original line number Diff line number Diff line
@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
		dev_err(&hr_dev->pdev->dev,
		dev_err(hr_dev->dev,
			"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
			event_type, hr_cq->cqn);
		return;
@@ -87,7 +87,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
{
	struct hns_roce_cmd_mailbox *mailbox = NULL;
	struct hns_roce_cq_table *cq_table = NULL;
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	dma_addr_t dma_handle;
	u64 *mtts = NULL;
	int ret = 0;
@@ -182,7 +182,7 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	int ret;

	ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
@@ -282,7 +282,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
				    struct ib_udata *udata)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	struct hns_roce_ib_create_cq ucmd;
	struct hns_roce_cq *hr_cq = NULL;
	struct hns_roce_uar *uar = NULL;
@@ -416,7 +416,7 @@ EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq);

void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	struct hns_roce_cq *cq;

	cq = radix_tree_lookup(&hr_dev->cq_table.tree,
@@ -432,7 +432,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	struct hns_roce_cq *cq;

	cq = radix_tree_lookup(&cq_table->tree,
+6 −6
Original line number Diff line number Diff line
@@ -84,7 +84,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
		 * memory, directly return fail.
		 */
		mem = &chunk->mem[chunk->npages];
		buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
		buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
				&sg_dma_address(mem), gfp_mask);
		if (!buf)
			goto fail;
@@ -115,7 +115,7 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)

	list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
		for (i = 0; i < chunk->npages; ++i)
			dma_free_coherent(&hr_dev->pdev->dev,
			dma_free_coherent(hr_dev->dev,
				   chunk->mem[i].length,
				   lowmem_page_address(sg_page(&chunk->mem[i])),
				   sg_dma_address(&chunk->mem[i]));
@@ -128,8 +128,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
			    struct hns_roce_hem_table *table, unsigned long obj)
{
	struct device *dev = &hr_dev->pdev->dev;
	spinlock_t *lock = &hr_dev->bt_cmd_lock;
	struct device *dev = hr_dev->dev;
	unsigned long end = 0;
	unsigned long flags;
	struct hns_roce_hem_iter iter;
@@ -212,7 +212,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
		       struct hns_roce_hem_table *table, unsigned long obj)
{
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	int ret = 0;
	unsigned long i;

@@ -251,7 +251,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
			struct hns_roce_hem_table *table, unsigned long obj)
{
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	unsigned long i;

	i = (obj & (table->num_obj - 1)) /
@@ -380,7 +380,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
				struct hns_roce_hem_table *table)
{
	struct device *dev = &hr_dev->pdev->dev;
	struct device *dev = hr_dev->dev;
	unsigned long i;

	for (i = 0; i < table->num_hem; ++i)
Loading