Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 60138066 authored by Krishna Gudipati's avatar Krishna Gudipati Committed by James Bottomley
Browse files

[SCSI] bfa: Extend BSG interface.



- Added support to collect driver/fw stats.
- Added support to perform adapter/ioc enable, disable operations.

Signed-off-by: default avatarKrishna Gudipati <kgudipat@brocade.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent d7be54cc
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -249,6 +249,7 @@ struct bfa_iocfc_s {
	struct bfa_cb_qe_s	init_hcb_qe;
	struct bfa_cb_qe_s	stop_hcb_qe;
	struct bfa_cb_qe_s	dis_hcb_qe;
	struct bfa_cb_qe_s	en_hcb_qe;
	struct bfa_cb_qe_s	stats_hcb_qe;
	bfa_boolean_t		cfgdone;

+20 −1
Original line number Diff line number Diff line
@@ -125,6 +125,7 @@ enum {
	BFA_IOCFC_ACT_INIT	= 1,
	BFA_IOCFC_ACT_STOP	= 2,
	BFA_IOCFC_ACT_DISABLE	= 3,
	BFA_IOCFC_ACT_ENABLE	= 4,
};

#define DEF_CFG_NUM_FABRICS		1
@@ -677,6 +678,16 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
		bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
}

static void
bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
{
	struct bfa_s	*bfa = bfa_arg;
	struct bfad_s *bfad = bfa->bfad;

	if (compl)
		complete(&bfad->enable_comp);
}

static void
bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
{
@@ -760,9 +771,13 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)

	if (iocfc->action == BFA_IOCFC_ACT_INIT)
		bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
	else
	else {
		if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
			bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
					bfa_iocfc_enable_cb, bfa);
		bfa_iocfc_start_submod(bfa);
	}
}
void
bfa_iocfc_reset_queues(struct bfa_s *bfa)
{
@@ -970,6 +985,9 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
		if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
			bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
				     bfa_iocfc_init_cb, bfa);
		else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
			bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
					bfa_iocfc_enable_cb, bfa);
		return;
	}

@@ -1236,6 +1254,7 @@ bfa_iocfc_enable(struct bfa_s *bfa)
{
	bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
		     "IOC Enable");
	bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
	bfa_ioc_enable(&bfa->ioc);
}

+5 −3
Original line number Diff line number Diff line
@@ -93,9 +93,11 @@ struct bfa_lport_cfg_s {
	wwn_t	       pwwn;       /*  port wwn */
	wwn_t	       nwwn;       /*  node wwn */
	struct bfa_lport_symname_s  sym_name;   /*  vm port symbolic name */
	bfa_boolean_t       preboot_vp;  /*  vport created from PBC */
	enum bfa_lport_role roles;      /* FCS port roles */
	u32     rsvd;
	bfa_boolean_t   preboot_vp;  /*  vport created from PBC */
	u8	tag[16];        /* opaque tag from application */
	u8	padding[4];
};

/*
+54 −1
Original line number Diff line number Diff line
@@ -65,8 +65,12 @@ struct bfa_iocfc_drvcfg_s {
	u16	    ioc_recover;	/*  IOC recovery mode		  */
	u16	    min_cfg;	/*  minimum configuration	  */
	u16        path_tov;	/*  device path timeout	  */
	u16		num_tio_reqs;   /*!< number of TM IO reqs	*/
	u8		port_mode;
	u8		rsvd_a;
	bfa_boolean_t   delay_comp; /*  delay completion of
							failed inflight IOs */
	u16		num_ttsk_reqs;	 /* TM task management requests */
	u32		rsvd;
};

@@ -81,7 +85,7 @@ struct bfa_iocfc_cfg_s {
/*
 * IOC firmware IO stats
 */
struct bfa_fw_io_stats_s {
struct bfa_fw_ioim_stats_s {
	u32	host_abort;		/*  IO aborted by host driver*/
	u32	host_cleanup;		/*  IO clean up by host driver */

@@ -151,6 +155,54 @@ struct bfa_fw_io_stats_s {
						 */
};

struct bfa_fw_tio_stats_s {
	u32	tio_conf_proc;	/* TIO CONF processed */
	u32	tio_conf_drop;      /* TIO CONF dropped */
	u32	tio_cleanup_req;    /* TIO cleanup requested */
	u32	tio_cleanup_comp;   /* TIO cleanup completed */
	u32	tio_abort_rsp;      /* TIO abort response */
	u32	tio_abort_rsp_comp; /* TIO abort rsp completed */
	u32	tio_abts_req;       /* TIO ABTS requested */
	u32	tio_abts_ack;       /* TIO ABTS ack-ed */
	u32	tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */
	u32	tio_abts_tmo;       /* TIO ABTS timeout */
	u32	tio_snsdata_dma;    /* TIO sense data DMA */
	u32	tio_rxwchan_wait; /* TIO waiting for RX wait channel */
	u32	tio_rxwchan_avail; /* TIO RX wait channel available */
	u32	tio_hit_bls;        /* TIO IOH BLS event */
	u32	tio_uf_recv;        /* TIO received UF */
	u32	tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
	u32	tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */

	u32	ds_rxwchan_wait; /* DS waiting for RX wait channel */
	u32	ds_rxwchan_avail; /* DS RX wait channel available */
	u32	ds_unaligned_rd;    /* DS unaligned read */
	u32	ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */
	u32	ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */
	u32	ds_flush_req;       /* DS flush requested */
	u32	ds_flush_comp;      /* DS flush completed */
	u32	ds_xfrdy_exp;       /* DS XFER_RDY expired */
	u32	ds_seq_cnt_err;     /* DS seq cnt error */
	u32	ds_seq_len_err;     /* DS seq len error */
	u32	ds_data_oor;        /* DS data out of order */
	u32	ds_hit_bls;     /* DS hit BLS */
	u32	ds_edtov_timer_exp; /* DS edtov expired */
	u32	ds_cpu_owned;       /* DS cpu owned */
	u32	ds_hit_class2;      /* DS hit class2 */
	u32	ds_length_err;      /* DS length error */
	u32	ds_ro_ooo_err;      /* DS relative offset out-of-order error */
	u32	ds_rectov_timer_exp;    /* DS rectov expired */
	u32	ds_unexp_fr_err;    /* DS unexp frame error */
};

/*
 * IOC firmware IO stats
 */
struct bfa_fw_io_stats_s {
	struct bfa_fw_ioim_stats_s	ioim_stats;
	struct bfa_fw_tio_stats_s	tio_stats;
};

/*
 * IOC port firmware stats
 */
@@ -204,6 +256,7 @@ struct bfa_fw_port_lksm_stats_s {
    u32    nos_tx;             /*  No. of times NOS tx started         */
    u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
    u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM      */
	u32	bbsc_lr;	/* LKSM LR tx for credit recovery	*/
};

struct bfa_fw_port_snsm_stats_s {
+72 −0
Original line number Diff line number Diff line
@@ -365,6 +365,78 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa)
	return fcpim->path_tov / 1000;
}

#define bfa_fcpim_add_iostats(__l, __r, __stats)	\
	(__l->__stats += __r->__stats)

void
bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
		struct bfa_itnim_iostats_s *rstats)
{
	bfa_fcpim_add_iostats(lstats, rstats, total_ios);
	bfa_fcpim_add_iostats(lstats, rstats, qresumes);
	bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
	bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
	bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
	bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
	bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
	bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
	bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
	bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
	bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
	bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
	bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
	bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
	bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
	bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
	bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
	bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
	bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
	bfa_fcpim_add_iostats(lstats, rstats, onlines);
	bfa_fcpim_add_iostats(lstats, rstats, offlines);
	bfa_fcpim_add_iostats(lstats, rstats, creates);
	bfa_fcpim_add_iostats(lstats, rstats, deletes);
	bfa_fcpim_add_iostats(lstats, rstats, create_comps);
	bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
	bfa_fcpim_add_iostats(lstats, rstats, sler_events);
	bfa_fcpim_add_iostats(lstats, rstats, fw_create);
	bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
	bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
	bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
	bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
	bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
	bfa_fcpim_add_iostats(lstats, rstats, tm_success);
	bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
	bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
	bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
	bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
	bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
	bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
	bfa_fcpim_add_iostats(lstats, rstats, io_comps);
	bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
	bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
	bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
	bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
}

bfa_status_t
bfa_fcpim_port_iostats(struct bfa_s *bfa,
		struct bfa_itnim_iostats_s *stats, u8 lp_tag)
{
	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
	struct list_head *qe, *qen;
	struct bfa_itnim_s *itnim;

	/* accumulate IO stats from itnim */
	memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
		itnim = (struct bfa_itnim_s *) qe;
		if (itnim->rport->rport_info.lp_tag != lp_tag)
			continue;
		bfa_fcpim_add_stats(stats, &(itnim->stats));
	}
	return BFA_STATUS_OK;
}

u16
bfa_fcpim_qdepth_get(struct bfa_s *bfa)
{
Loading