Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4507025d authored by Krishna Gudipati's avatar Krishna Gudipati Committed by James Bottomley
Browse files

[SCSI] bfa: DMA memory allocation enhancement.



- Modified the design such that each BFA sub-module will provide
  the amount of DMA and KVA memory needed by it and queues the
  same request to the global dma and kva info queues.
- During the memory allocation we iterate over this queue to allocate
  the dma and kva memory requested by sub-modules.
- The change is needed to avoid requesting the aggregate amount of memory
  needed by all the BFA sub-modules as one contiguous chunk.

Signed-off-by: default avatarKrishna Gudipati <kgudipat@brocade.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent 3fd45980
Loading
Loading
Loading
Loading
+17 −33
Original line number Diff line number Diff line
@@ -172,34 +172,6 @@ struct bfa_pciid_s {

extern char     bfa_version[];

/*
 * BFA memory resources
 */
enum bfa_mem_type {
	BFA_MEM_TYPE_KVA = 1,	/*  Kernel Virtual Memory *(non-dma-able) */
	BFA_MEM_TYPE_DMA = 2,	/*  DMA-able memory */
	BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
};

struct bfa_mem_elem_s {
	enum bfa_mem_type mem_type;	/* see enum bfa_mem_type */
	u32	mem_len;	/*  Total Length in Bytes	*/
	u8		*kva;		/*  kernel virtual address	*/
	u64	dma;		/*  dma address if DMA memory	*/
	u8		*kva_curp;	/*  kva allocation cursor	*/
	u64	dma_curp;	/*  dma allocation cursor	*/
};

struct bfa_meminfo_s {
	struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
};
#define bfa_meminfo_kva(_m)				\
	((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
#define bfa_meminfo_dma_virt(_m)			\
	((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
#define bfa_meminfo_dma_phys(_m)			\
	((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)

struct bfa_iocfc_regs_s {
	void __iomem	*intr_status;
	void __iomem	*intr_mask;
@@ -294,8 +266,19 @@ struct bfa_iocfc_s {
	void			*updateq_cbarg;	/*  bios callback arg */
	u32	intr_mask;
	struct bfa_faa_args_s	faa_args;
	struct bfa_mem_dma_s	ioc_dma;
	struct bfa_mem_dma_s	iocfc_dma;
	struct bfa_mem_dma_s	reqq_dma[BFI_IOC_MAX_CQS];
	struct bfa_mem_dma_s	rspq_dma[BFI_IOC_MAX_CQS];
	struct bfa_mem_kva_s	kva_seg;
};

#define BFA_MEM_IOC_DMA(_bfa)		(&((_bfa)->iocfc.ioc_dma))
#define BFA_MEM_IOCFC_DMA(_bfa)		(&((_bfa)->iocfc.iocfc_dma))
#define BFA_MEM_REQQ_DMA(_bfa, _qno)	(&((_bfa)->iocfc.reqq_dma[(_qno)]))
#define BFA_MEM_RSPQ_DMA(_bfa, _qno)	(&((_bfa)->iocfc.rspq_dma[(_qno)]))
#define BFA_MEM_IOCFC_KVA(_bfa)		(&((_bfa)->iocfc.kva_seg))

#define bfa_fn_lpu(__bfa)	\
	bfi_fn_lpu(bfa_ioc_pcifn(&(__bfa)->ioc), bfa_ioc_portid(&(__bfa)->ioc))
#define bfa_msix_init(__bfa, __nvecs)					\
@@ -329,17 +312,17 @@ struct bfa_iocfc_s {
/*
 * FC specific IOC functions.
 */
void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
		       u32 *dm_len);
void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg,
			struct bfa_meminfo_s *meminfo,
			struct bfa_s *bfa);
void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
		      struct bfa_iocfc_cfg_s *cfg,
		      struct bfa_meminfo_s *meminfo,
		      struct bfa_pcidev_s *pcidev);
void bfa_iocfc_init(struct bfa_s *bfa);
void bfa_iocfc_start(struct bfa_s *bfa);
void bfa_iocfc_stop(struct bfa_s *bfa);
void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa);
bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
void bfa_iocfc_reset_queues(struct bfa_s *bfa);

@@ -418,7 +401,8 @@ void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
			 struct bfa_meminfo_s *meminfo);
			struct bfa_meminfo_s *meminfo,
			struct bfa_s *bfa);
void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
		struct bfa_meminfo_s *meminfo,
		struct bfa_pcidev_s *pcidev);
+124 −147
Original line number Diff line number Diff line
@@ -89,46 +89,26 @@ static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {


static void
bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
bfa_com_port_attach(struct bfa_s *bfa)
{
	struct bfa_port_s	*port = &bfa->modules.port;
	u32			dm_len;
	u8			*dm_kva;
	u64			dm_pa;
	struct bfa_mem_dma_s	*port_dma = BFA_MEM_PORT_DMA(bfa);

	dm_len = bfa_port_meminfo();
	dm_kva = bfa_meminfo_dma_virt(mi);
	dm_pa  = bfa_meminfo_dma_phys(mi);

	memset(port, 0, sizeof(struct bfa_port_s));
	bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
	bfa_port_mem_claim(port, dm_kva, dm_pa);

	bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
	bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
	bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
}

/*
 * ablk module attach
 */
static void
bfa_com_ablk_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
bfa_com_ablk_attach(struct bfa_s *bfa)
{
	struct bfa_ablk_s	*ablk = &bfa->modules.ablk;
	u32			dm_len;
	u8			*dm_kva;
	u64			dm_pa;
	struct bfa_mem_dma_s	*ablk_dma = BFA_MEM_ABLK_DMA(bfa);

	dm_len = bfa_ablk_meminfo();
	dm_kva = bfa_meminfo_dma_virt(mi);
	dm_pa  = bfa_meminfo_dma_phys(mi);

	memset(ablk, 0, sizeof(struct bfa_ablk_s));
	bfa_ablk_attach(ablk, &bfa->ioc);
	bfa_ablk_memclaim(ablk, dm_kva, dm_pa);

	bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
	bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
	bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
}

/*
@@ -444,41 +424,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 *  BFA IOC private functions
 */

static void
bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
{
	int		i, per_reqq_sz, per_rspq_sz;

	per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
				  BFA_DMA_ALIGN_SZ);
	per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
				  BFA_DMA_ALIGN_SZ);

	/*
	 * Calculate CQ size
	 */
	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
		*dm_len = *dm_len + per_reqq_sz;
		*dm_len = *dm_len + per_rspq_sz;
	}

	/*
	 * Calculate Shadow CI/PI size
	 */
	for (i = 0; i < cfg->fwcfg.num_cqs; i++)
		*dm_len += (2 * BFA_CACHELINE_SZ);
}

static void
bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
{
	*dm_len +=
		BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
	*dm_len +=
		BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
			    BFA_CACHELINE_SZ);
}

/*
 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
 */
@@ -604,48 +549,42 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
}

static void
bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
		    struct bfa_meminfo_s *meminfo)
bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
{
	u8	       *dm_kva;
	u64	dm_pa;
	int		i, per_reqq_sz, per_rspq_sz;
	u8	*dm_kva = NULL;
	u64	dm_pa = 0;
	int	i, per_reqq_sz, per_rspq_sz, dbgsz;
	struct bfa_iocfc_s  *iocfc = &bfa->iocfc;
	int		dbgsz;
	struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
	struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
	struct bfa_mem_dma_s *reqq_dma, *rspq_dma;

	dm_kva = bfa_meminfo_dma_virt(meminfo);
	dm_pa = bfa_meminfo_dma_phys(meminfo);
	/* First allocate dma memory for IOC */
	bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
			bfa_mem_dma_phys(ioc_dma));

	/*
	 * First allocate dma memory for IOC.
	 */
	bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
	dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
	dm_pa  += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);

	/*
	 * Claim DMA-able memory for the request/response queues and for shadow
	 * ci/pi registers
	 */
	/* Claim DMA-able memory for the request/response queues */
	per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
				BFA_DMA_ALIGN_SZ);
	per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
				BFA_DMA_ALIGN_SZ);

	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
		iocfc->req_cq_ba[i].kva = dm_kva;
		iocfc->req_cq_ba[i].pa = dm_pa;
		memset(dm_kva, 0, per_reqq_sz);
		dm_kva += per_reqq_sz;
		dm_pa += per_reqq_sz;
		reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
		iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
		iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
		memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);

		iocfc->rsp_cq_ba[i].kva = dm_kva;
		iocfc->rsp_cq_ba[i].pa = dm_pa;
		memset(dm_kva, 0, per_rspq_sz);
		dm_kva += per_rspq_sz;
		dm_pa += per_rspq_sz;
		rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
		iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
		iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
		memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
	}

	/* Claim IOCFC dma memory - for shadow CI/PI */
	dm_kva = bfa_mem_dma_virt(iocfc_dma);
	dm_pa  = bfa_mem_dma_phys(iocfc_dma);

	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
		iocfc->req_cq_shadow_ci[i].kva = dm_kva;
		iocfc->req_cq_shadow_ci[i].pa = dm_pa;
@@ -658,36 +597,27 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
		dm_pa += BFA_CACHELINE_SZ;
	}

	/*
	 * Claim DMA-able memory for the config info page
	 */
	/* Claim IOCFC dma memory - for the config info page */
	bfa->iocfc.cfg_info.kva = dm_kva;
	bfa->iocfc.cfg_info.pa = dm_pa;
	bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
	dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
	dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);

	/*
	 * Claim DMA-able memory for the config response
	 */
	/* Claim IOCFC dma memory - for the config response */
	bfa->iocfc.cfgrsp_dma.kva = dm_kva;
	bfa->iocfc.cfgrsp_dma.pa = dm_pa;
	bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;

	dm_kva +=
		BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
	dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
			BFA_CACHELINE_SZ);
	dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
			BFA_CACHELINE_SZ);


	bfa_meminfo_dma_virt(meminfo) = dm_kva;
	bfa_meminfo_dma_phys(meminfo) = dm_pa;

	/* Claim IOCFC kva memory */
	dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
	if (dbgsz > 0) {
		bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
		bfa_meminfo_kva(meminfo) += dbgsz;
		bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
		bfa_mem_kva_curp(iocfc) += dbgsz;
	}
}

@@ -1102,15 +1032,47 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
 * Query IOC memory requirement information.
 */
void
bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
		  u32 *dm_len)
bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
		  struct bfa_s *bfa)
{
	/* dma memory for IOC */
	*dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
	int q, per_reqq_sz, per_rspq_sz;
	struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
	struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
	struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
	u32	dm_len = 0;

	bfa_iocfc_fw_cfg_sz(cfg, dm_len);
	bfa_iocfc_cqs_sz(cfg, dm_len);
	*km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
	/* dma memory setup for IOC */
	bfa_mem_dma_setup(meminfo, ioc_dma,
		BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));

	/* dma memory setup for REQ/RSP queues */
	per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
				BFA_DMA_ALIGN_SZ);
	per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
				BFA_DMA_ALIGN_SZ);

	for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
		bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
				per_reqq_sz);
		bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
				per_rspq_sz);
	}

	/* IOCFC dma memory - calculate Shadow CI/PI size */
	for (q = 0; q < cfg->fwcfg.num_cqs; q++)
		dm_len += (2 * BFA_CACHELINE_SZ);

	/* IOCFC dma memory - calculate config info / rsp size */
	dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
	dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
			BFA_CACHELINE_SZ);

	/* dma memory setup for IOCFC */
	bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);

	/* kva memory setup for IOCFC */
	bfa_mem_kva_setup(meminfo, iocfc_kva,
			((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
}

/*
@@ -1118,7 +1080,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 */
void
bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
		 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
		 struct bfa_pcidev_s *pcidev)
{
	int		i;
	struct bfa_ioc_s *ioc = &bfa->ioc;
@@ -1135,7 +1097,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
	bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);

	bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
	bfa_iocfc_mem_claim(bfa, cfg, meminfo);
	bfa_iocfc_mem_claim(bfa, cfg);
	INIT_LIST_HEAD(&bfa->timer_mod.timer_q);

	INIT_LIST_HEAD(&bfa->comp_q);
@@ -1259,12 +1221,12 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
}

void
bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
{
	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;

	iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
	bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
	bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
}
/*
 * Enable IOC after it is disabled.
@@ -1353,34 +1315,37 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
 *			starting address for each block and provide the same
 *			structure as input parameter to bfa_attach() call.
 *
 * @param[in] bfa -	pointer to the bfa structure, used while fetching the
 *			dma, kva memory information of the bfa sub-modules.
 *
 * @return void
 *
 * Special Considerations: @note
 */
void
bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
		struct bfa_s *bfa)
{
	int		i;
	u32	km_len = 0, dm_len = 0;
	struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
	struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);

	WARN_ON((cfg == NULL) || (meminfo == NULL));

	memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
	meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
		BFA_MEM_TYPE_KVA;
	meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
		BFA_MEM_TYPE_DMA;

	bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
	/* Initialize the DMA & KVA meminfo queues */
	INIT_LIST_HEAD(&meminfo->dma_info.qe);
	INIT_LIST_HEAD(&meminfo->kva_info.qe);

	for (i = 0; hal_mods[i]; i++)
		hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
	bfa_iocfc_meminfo(cfg, meminfo, bfa);

	dm_len += bfa_port_meminfo();
	dm_len += bfa_ablk_meminfo();
	for (i = 0; hal_mods[i]; i++)
		hal_mods[i]->meminfo(cfg, meminfo, bfa);

	meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
	meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
	/* dma info setup */
	bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
	bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
}

/*
@@ -1414,28 +1379,40 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
	       struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
	int	i;
	struct bfa_mem_elem_s	*melem;
	struct bfa_mem_dma_s *dma_info, *dma_elem;
	struct bfa_mem_kva_s *kva_info, *kva_elem;
	struct list_head *dm_qe, *km_qe;

	bfa->fcs = BFA_FALSE;

	WARN_ON((cfg == NULL) || (meminfo == NULL));

	/*
	 * initialize all memory pointers for iterative allocation
	 */
	for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
		melem = meminfo->meminfo + i;
		melem->kva_curp = melem->kva;
		melem->dma_curp = melem->dma;
	/* Initialize memory pointers for iterative allocation */
	dma_info = &meminfo->dma_info;
	dma_info->kva_curp = dma_info->kva;
	dma_info->dma_curp = dma_info->dma;

	kva_info = &meminfo->kva_info;
	kva_info->kva_curp = kva_info->kva;

	list_for_each(dm_qe, &dma_info->qe) {
		dma_elem = (struct bfa_mem_dma_s *) dm_qe;
		dma_elem->kva_curp = dma_elem->kva;
		dma_elem->dma_curp = dma_elem->dma;
	}

	list_for_each(km_qe, &kva_info->qe) {
		kva_elem = (struct bfa_mem_kva_s *) km_qe;
		kva_elem->kva_curp = kva_elem->kva;
	}

	bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
	bfa_iocfc_attach(bfa, bfad, cfg, pcidev);

	for (i = 0; hal_mods[i]; i++)
		hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
		hal_mods[i]->attach(bfa, bfad, cfg, pcidev);

	bfa_com_port_attach(bfa, meminfo);
	bfa_com_ablk_attach(bfa, meminfo);
	bfa_com_port_attach(bfa);
	bfa_com_ablk_attach(bfa);
}

/*
+75 −44
Original line number Diff line number Diff line
@@ -286,10 +286,9 @@ static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
 * Compute and return memory needed by FCP(im) module.
 */
static void
bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
		u32 *dm_len)
bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
{
	bfa_itnim_meminfo(cfg, km_len, dm_len);
	bfa_itnim_meminfo(cfg, km_len);

	/*
	 * IO memory
@@ -308,8 +307,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,

static void
bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
		struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
		struct bfa_pcidev_s *pcidev)
		struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
{
	struct bfa_fcpim_s *fcpim = &fcp->fcpim;
	struct bfa_s *bfa = fcp->bfa;
@@ -328,9 +326,9 @@ bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
	fcpim->profile_comp = NULL;
	fcpim->profile_start = NULL;

	bfa_itnim_attach(fcpim, meminfo);
	bfa_tskim_attach(fcpim, meminfo);
	bfa_ioim_attach(fcpim, meminfo);
	bfa_itnim_attach(fcpim);
	bfa_tskim_attach(fcpim);
	bfa_ioim_attach(fcpim);
}

static void
@@ -972,8 +970,7 @@ bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
}

void
bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
		u32 *dm_len)
bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
{
	/*
	 * ITN memory
@@ -982,15 +979,16 @@ bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
}

void
bfa_itnim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
{
	struct bfa_s	*bfa = fcpim->bfa;
	struct bfa_fcp_mod_s	*fcp = fcpim->fcp;
	struct bfa_itnim_s *itnim;
	int	i, j;

	INIT_LIST_HEAD(&fcpim->itnim_q);

	itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
	itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
	fcpim->itnim_arr = itnim;

	for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
@@ -1012,7 +1010,7 @@ bfa_itnim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
	}

	bfa_meminfo_kva(minfo) = (u8 *) itnim;
	bfa_mem_kva_curp(fcp) = (u8 *) itnim;
}

void
@@ -2345,22 +2343,23 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
 * Memory allocation and initialization.
 */
void
bfa_ioim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
{
	struct bfa_ioim_s		*ioim;
	struct bfa_fcp_mod_s	*fcp = fcpim->fcp;
	struct bfa_ioim_sp_s	*iosp;
	u16		i;

	/*
	 * claim memory first
	 */
	ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
	ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
	fcpim->ioim_arr = ioim;
	bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
	bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);

	iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
	iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
	fcpim->ioim_sp_arr = iosp;
	bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
	bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);

	/*
	 * Initialize ioim free queues
@@ -3109,15 +3108,16 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
 * Memory allocation and initialization.
 */
void
bfa_tskim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
{
	struct bfa_tskim_s *tskim;
	struct bfa_fcp_mod_s	*fcp = fcpim->fcp;
	u16	i;

	INIT_LIST_HEAD(&fcpim->tskim_free_q);
	INIT_LIST_HEAD(&fcpim->tskim_unused_q);

	tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
	tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
	fcpim->tskim_arr = tskim;

	for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
@@ -3136,7 +3136,7 @@ bfa_tskim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
		list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
	}

	bfa_meminfo_kva(minfo) = (u8 *) tskim;
	bfa_mem_kva_curp(fcp) = (u8 *) tskim;
}

void
@@ -3233,9 +3233,14 @@ bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
BFA_MODULE(fcp);

static void
bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, u32 *dm_len)
bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
		struct bfa_s *bfa)
{
	u16	num_io_req;
	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
	struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
	struct bfa_mem_dma_s *seg_ptr;
	u16	nsegs, idx, per_seg_ios, num_io_req;
	u32	km_len = 0;

	/*
	 * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
@@ -3261,43 +3266,69 @@ bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, u32 *dm_len)
			cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
	}

	bfa_fcpim_meminfo(cfg, km_len, dm_len);
	bfa_fcpim_meminfo(cfg, &km_len);

	num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
	*km_len += num_io_req * sizeof(struct bfa_iotag_s);
	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
	*dm_len += num_io_req * BFI_IOIM_SNSLEN;
	km_len += num_io_req * sizeof(struct bfa_iotag_s);
	km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);

	/* dma memory */
	nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
	per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);

	bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
		if (num_io_req >= per_seg_ios) {
			num_io_req -= per_seg_ios;
			bfa_mem_dma_setup(minfo, seg_ptr,
				per_seg_ios * BFI_IOIM_SNSLEN);
		} else
			bfa_mem_dma_setup(minfo, seg_ptr,
				num_io_req * BFI_IOIM_SNSLEN);
	}

	/* kva memory */
	bfa_mem_kva_setup(minfo, fcp_kva, km_len);
}

static void
bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
		struct bfa_pcidev_s *pcidev)
{
	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
	u32	snsbufsz;
	struct bfa_mem_dma_s *seg_ptr;
	u16	idx, nsegs, num_io_req;

	fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
	fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
	fcp->num_itns   = cfg->fwcfg.num_rports;
	fcp->bfa = bfa;

	snsbufsz = (fcp->num_ioim_reqs + fcp->num_fwtio_reqs) * BFI_IOIM_SNSLEN;
	fcp->snsbase.pa = bfa_meminfo_dma_phys(meminfo);
	bfa_meminfo_dma_phys(meminfo) += snsbufsz;
	/*
	 * Setup the pool of snsbase addr's, that is passed to fw as
	 * part of bfi_iocfc_cfg_s.
	 */
	num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
	nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);

	bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {

	fcp->snsbase.kva = bfa_meminfo_dma_virt(meminfo);
	bfa_meminfo_dma_virt(meminfo) += snsbufsz;
	bfa_iocfc_set_snsbase(bfa, fcp->snsbase.pa);
		if (!bfa_mem_dma_virt(seg_ptr))
			break;

	bfa_fcpim_attach(fcp, bfad, cfg, meminfo, pcidev);
		fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
		fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
		bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
	}

	fcp->itn_arr = (struct bfa_itn_s *) bfa_meminfo_kva(meminfo);
	bfa_meminfo_kva(meminfo) = (u8 *)fcp->itn_arr +
	bfa_fcpim_attach(fcp, bfad, cfg, pcidev);

	bfa_iotag_attach(fcp);

	fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
	bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
			(fcp->num_itns * sizeof(struct bfa_itn_s));
	memset(fcp->itn_arr, 0,
			(fcp->num_itns * sizeof(struct bfa_itn_s)));

	bfa_iotag_attach(fcp, meminfo);
}

static void
@@ -3370,12 +3401,12 @@ bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
}

void
bfa_iotag_attach(struct bfa_fcp_mod_s *fcp, struct bfa_meminfo_s *minfo)
bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
{
	struct bfa_iotag_s *iotag;
	u16	num_io_req, i;

	iotag = (struct bfa_iotag_s *) bfa_meminfo_kva(minfo);
	iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
	fcp->iotag_arr = iotag;

	INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
@@ -3392,5 +3423,5 @@ bfa_iotag_attach(struct bfa_fcp_mod_s *fcp, struct bfa_meminfo_s *minfo)
			list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
	}

	bfa_meminfo_kva(minfo) = (u8 *) iotag;
	bfa_mem_kva_curp(fcp) = (u8 *) iotag;
}
+15 −13
Original line number Diff line number Diff line
@@ -25,8 +25,8 @@
#include "bfa_cs.h"

/* FCP module related definitions */
#define BFA_IO_MAX	2000
#define BFA_FWTIO_MAX	0
#define BFA_IO_MAX	BFI_IO_MAX
#define BFA_FWTIO_MAX	2000

struct bfa_fcp_mod_s;
struct bfa_iotag_s {
@@ -41,16 +41,17 @@ struct bfa_itn_s {
void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
		void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp, struct bfa_meminfo_s *minfo);
void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw);

#define BFA_FCP_MOD(_hal)	(&(_hal)->modules.fcp_mod)
#define BFA_MEM_FCP_KVA(__bfa)	(&(BFA_FCP_MOD(__bfa)->kva_seg))
#define BFA_IOTAG_FROM_TAG(_fcp, _tag)	\
	(&(_fcp)->iotag_arr[(_tag & BFA_IOIM_IOTAG_MASK)])
#define BFA_ITN_FROM_TAG(_fcp, _tag)	\
	((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
	(((u8 *)(_fcp)->snsbase.kva) + (_tag * BFI_IOIM_SNSLEN))
	bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN)

#define BFA_ITNIM_MIN   32
#define BFA_ITNIM_MAX   1024
@@ -130,6 +131,9 @@ struct bfa_fcpim_s {
	bfa_fcpim_profile_t     profile_start;
};

/* Max FCP dma segs required */
#define BFA_FCP_DMA_SEGS	BFI_IOIM_SNSBUF_SEGS

struct bfa_fcp_mod_s {
	struct bfa_s		*bfa;
	struct list_head	iotag_ioim_free_q;	/* free IO resources */
@@ -140,8 +144,10 @@ struct bfa_fcp_mod_s {
	int			num_ioim_reqs;
	int			num_fwtio_reqs;
	int			num_itns;
	struct bfa_dma_s	snsbase;
	struct bfa_dma_s	snsbase[BFA_FCP_DMA_SEGS];
	struct bfa_fcpim_s	fcpim;
	struct bfa_mem_dma_s	dma_seg[BFA_FCP_DMA_SEGS];
	struct bfa_mem_kva_s	kva_seg;
};

/*
@@ -256,8 +262,7 @@ bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
/*
 * function prototypes
 */
void	bfa_ioim_attach(struct bfa_fcpim_s *fcpim,
					struct bfa_meminfo_s *minfo);
void	bfa_ioim_attach(struct bfa_fcpim_s *fcpim);
void	bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void	bfa_ioim_good_comp_isr(struct bfa_s *bfa,
					struct bfi_msg_s *msg);
@@ -267,18 +272,15 @@ void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
void	bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
void	bfa_ioim_tov(struct bfa_ioim_s *ioim);

void	bfa_tskim_attach(struct bfa_fcpim_s *fcpim,
					struct bfa_meminfo_s *minfo);
void	bfa_tskim_attach(struct bfa_fcpim_s *fcpim);
void	bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void	bfa_tskim_iodone(struct bfa_tskim_s *tskim);
void	bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
void	bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
void	bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw);

void	bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
					u32 *dm_len);
void	bfa_itnim_attach(struct bfa_fcpim_s *fcpim,
					struct bfa_meminfo_s *minfo);
void	bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len);
void	bfa_itnim_attach(struct bfa_fcpim_s *fcpim);
void	bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
void	bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void	bfa_itnim_iodone(struct bfa_itnim_s *itnim);
+64 −0

File changed.

Preview size limit exceeded, changes collapsed.

Loading