Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4d114fdd authored by Jubin John's avatar Jubin John Committed by Doug Ledford
Browse files

staging/rdma/hfi1: Fix block comments



Fix block comments with proper formatting to fix checkpatch warnings:
WARNING: Block comments use * on subsequent lines
WARNING: Block comments use a trailing */ on a separate line

Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarJubin John <jubin.john@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 6a14c5ea
Loading
Loading
Loading
Loading
+58 −29
Original line number Diff line number Diff line
@@ -6392,14 +6392,18 @@ static void dc_shutdown(struct hfi1_devdata *dd)
	spin_unlock_irqrestore(&dd->dc8051_lock, flags);
	/* Shutdown the LCB */
	lcb_shutdown(dd, 1);
	/* Going to OFFLINE would have causes the 8051 to put the
	/*
	 * Going to OFFLINE would have causes the 8051 to put the
	 * SerDes into reset already. Just need to shut down the 8051,
	 * itself. */
	 * itself.
	 */
	write_csr(dd, DC_DC8051_CFG_RST, 0x1);
}

/* Calling this after the DC has been brought out of reset should not
 * do any damage. */
/*
 * Calling this after the DC has been brought out of reset should not
 * do any damage.
 */
static void dc_start(struct hfi1_devdata *dd)
{
	unsigned long flags;
@@ -6525,8 +6529,10 @@ void handle_sma_message(struct work_struct *work)
	u64 msg;
	int ret;

	/* msg is bytes 1-4 of the 40-bit idle message - the command code
	   is stripped off */
	/*
	 * msg is bytes 1-4 of the 40-bit idle message - the command code
	 * is stripped off
	 */
	ret = read_idle_sma(dd, &msg);
	if (ret)
		return;
@@ -6815,8 +6821,10 @@ void handle_link_up(struct work_struct *work)
	}
}

/* Several pieces of LNI information were cached for SMA in ppd.
 * Reset these on link down */
/*
 * Several pieces of LNI information were cached for SMA in ppd.
 * Reset these on link down
 */
static void reset_neighbor_info(struct hfi1_pportdata *ppd)
{
	ppd->neighbor_guid = 0;
@@ -6862,8 +6870,10 @@ void handle_link_down(struct work_struct *work)
	/* disable the port */
	clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);

	/* If there is no cable attached, turn the DC off. Otherwise,
	 * start the link bring up. */
	/*
	 * If there is no cable attached, turn the DC off. Otherwise,
	 * start the link bring up.
	 */
	if (!qsfp_mod_present(ppd)) {
		dc_shutdown(ppd->dd);
	} else {
@@ -7564,8 +7574,10 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
	}

	if (queue_link_down) {
		/* if the link is already going down or disabled, do not
		 * queue another */
		/*
		 * if the link is already going down or disabled, do not
		 * queue another
		 */
		if ((ppd->host_link_state &
		    (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
		    ppd->link_enabled == 0) {
@@ -7712,8 +7724,10 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
			/* set status bit */
			dd->err_info_rcvport.status_and_code |=
				OPA_EI_STATUS_SMASK;
			/* save first 2 flits in the packet that caused
			 * the error */
			/*
			 * save first 2 flits in the packet that caused
			 * the error
			 */
			 dd->err_info_rcvport.packet_flit1 = hdr0;
			 dd->err_info_rcvport.packet_flit2 = hdr1;
		}
@@ -7913,8 +7927,10 @@ static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
}

static const struct is_table is_table[] = {
/* start		     end
				name func		interrupt func */
/*
 * start		 end
 *				name func		interrupt func
 */
{ IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
				is_misc_err_name,	is_misc_err_int },
{ IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
@@ -10763,8 +10779,10 @@ int set_buffer_control(struct hfi1_pportdata *ppd,
	 */
	memset(changing, 0, sizeof(changing));
	memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
	/* NOTE: Assumes that the individual VL bits are adjacent and in
	   increasing order */
	/*
	 * NOTE: Assumes that the individual VL bits are adjacent and in
	 * increasing order
	 */
	stat_mask =
		SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
	changing_mask = 0;
@@ -11129,8 +11147,10 @@ static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
	}

	rcd->rcvavail_timeout = timeout;
	/* timeout cannot be larger than rcv_intr_timeout_csr which has already
	   been verified to be in range */
	/*
	 * timeout cannot be larger than rcv_intr_timeout_csr which has already
	 * been verified to be in range
	 */
	write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
		(u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
}
@@ -11323,8 +11343,10 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
	if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
		rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
	if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
		/* In one-packet-per-eager mode, the size comes from
		   the RcvArray entry. */
		/*
		 * In one-packet-per-eager mode, the size comes from
		 * the RcvArray entry.
		 */
		rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
		rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
	}
@@ -12524,7 +12546,8 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
			me->type = IRQ_RCVCTXT;
		} else {
			/* not in our expected range - complain, then
			   ignore it */
			 * ignore it
			 */
			dd_dev_err(dd,
				"Unexpected extra MSI-X interrupt %d\n", i);
			continue;
@@ -12830,8 +12853,10 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)

	/* PIO Send buffers */
	/* SDMA Send buffers */
	/* These are not normally read, and (presently) have no method
	   to be read, so are not pre-initialized */
	/*
	 * These are not normally read, and (presently) have no method
	 * to be read, so are not pre-initialized
	 */

	/* RcvHdrAddr */
	/* RcvHdrTailAddr */
@@ -13026,8 +13051,10 @@ static void reset_misc_csrs(struct hfi1_devdata *dd)
		write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
		write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
	}
	/* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
	   only be written 128-byte chunks */
	/*
	 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
	 * only be written 128-byte chunks
	 */
	/* init RSA engine to clear lingering errors */
	write_csr(dd, MISC_CFG_RSA_CMD, 1);
	write_csr(dd, MISC_CFG_RSA_MU, 0);
@@ -14045,8 +14072,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
	dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
			& CCE_REVISION_CHIP_REV_MINOR_MASK;

	/* obtain the hardware ID - NOT related to unit, which is a
	   software enumeration */
	/*
	 * obtain the hardware ID - NOT related to unit, which is a
	 * software enumeration
	 */
	reg = read_csr(dd, CCE_REVISION2);
	dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
					& CCE_REVISION2_HFI_ID_MASK;
+8 −4
Original line number Diff line number Diff line
@@ -79,8 +79,10 @@
#define PIO_CMASK 0x7ff	/* counter mask for free and fill counters */
#define MAX_EAGER_ENTRIES    2048	/* max receive eager entries */
#define MAX_TID_PAIR_ENTRIES 1024	/* max receive expected pairs */
/* Virtual? Allocation Unit, defined as AU = 8*2^vAU, 64 bytes, AU is fixed
   at 64 bytes for all generation one devices */
/*
 * Virtual? Allocation Unit, defined as AU = 8*2^vAU, 64 bytes, AU is fixed
 * at 64 bytes for all generation one devices
 */
#define CM_VAU 3
/* HFI link credit count, AKA receive buffer depth (RBUF_DEPTH) */
#define CM_GLOBAL_CREDITS 0x940
@@ -518,8 +520,10 @@ enum {
#define LCB_CRC_48B			0x2	/* 48b CRC */
#define LCB_CRC_12B_16B_PER_LANE	0x3	/* 12b-16b per lane CRC */

/* the following enum is (almost) a copy/paste of the definition
 * in the OPA spec, section 20.2.2.6.8 (PortInfo) */
/*
 * the following enum is (almost) a copy/paste of the definition
 * in the OPA spec, section 20.2.2.6.8 (PortInfo)
 */
enum {
	PORT_LTP_CRC_MODE_NONE = 0,
	PORT_LTP_CRC_MODE_14 = 1, /* 14-bit LTP CRC mode (optional) */
+4 −2
Original line number Diff line number Diff line
@@ -388,8 +388,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
				break;
			}
			if (dd->flags & HFI1_FORCED_FREEZE) {
				/* Don't allow context reset if we are into
				 * forced freeze */
				/*
				 * Don't allow context reset if we are into
				 * forced freeze
				 */
				ret = -ENODEV;
				break;
			}
+12 −6
Original line number Diff line number Diff line
@@ -1294,8 +1294,10 @@ static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
	/* step 3: enable XDMEM access */
	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40);
	/* step 4: load firmware into SBus Master XDMEM */
	/* NOTE: the dmem address, write_en, and wdata are all pre-packed,
	   we only need to pick up the bytes and write them */
	/*
	 * NOTE: the dmem address, write_en, and wdata are all pre-packed,
	 * we only need to pick up the bytes and write them
	 */
	for (i = 0; i < fdet->firmware_len; i += 4) {
		sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER,
					*(u32 *)&fdet->firmware_ptr[i]);
@@ -1305,8 +1307,10 @@ static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
	/* step 6: allow SBus Spico to run */
	sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000);

	/* steps 7-11: run RSA, if it succeeds, firmware is available to
	   be swapped */
	/*
	 * steps 7-11: run RSA, if it succeeds, firmware is available to
	 * be swapped
	 */
	return run_rsa(dd, "PCIe serdes", fdet->signature);
}

@@ -1744,8 +1748,10 @@ int get_platform_config_field(struct hfi1_devdata *dd,

			src_ptr = (u32 *)((u8 *)src_ptr + seek);

			/* We expect the field to be byte aligned and whole byte
			 * lengths if we are here */
			/*
			 * We expect the field to be byte aligned and whole byte
			 * lengths if we are here
			 */
			memcpy(data, src_ptr, wlen);
			return 0;
		}
+4 −2
Original line number Diff line number Diff line
@@ -718,8 +718,10 @@ struct hfi1_pportdata {
	/* CA's max number of 64 entry units in the congestion control table */
	u8 cc_max_table_entries;

	/* begin congestion log related entries
	 * cc_log_lock protects all congestion log related data */
	/*
	 * begin congestion log related entries
	 * cc_log_lock protects all congestion log related data
	 */
	spinlock_t cc_log_lock ____cacheline_aligned_in_smp;
	u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
	u16 threshold_event_counter;
Loading