Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c56e256d authored by Oleg Drokin's avatar Oleg Drokin Committed by Greg Kroah-Hartman
Browse files

staging/lustre/include: Adjust comment style



This fixes most of the
"Block comments use a trailing */ on a separate line" checkpatch
warnings, also some slight reformats of structures or comments
at places.

Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent a1e616b0
Loading
Loading
Loading
Loading
+45 −24
Original line number Diff line number Diff line
@@ -157,7 +157,8 @@ struct cl_device {
};

/** \addtogroup cl_object cl_object
 * @{ */
 * @{
 */
/**
 * "Data attributes" of cl_object. Data attributes can be updated
 * independently for a sub-object, and top-object's attributes are calculated
@@ -288,13 +289,14 @@ struct cl_object_conf {

enum {
	/** configure layout, set up a new stripe, must be called while
	 * holding layout lock. */
	 * holding layout lock.
	 */
	OBJECT_CONF_SET = 0,
	/** invalidate the current stripe configuration due to losing
	 * layout lock. */
	 * layout lock.
	 */
	OBJECT_CONF_INVALIDATE = 1,
	/** wait for old layout to go away so that new layout can be
	 * set up. */
	/** wait for old layout to go away so that new layout can be set up. */
	OBJECT_CONF_WAIT = 2
};

@@ -393,7 +395,8 @@ struct cl_object_operations {
 */
struct cl_object_header {
	/** Standard lu_object_header. cl_object::co_lu::lo_header points
	 * here. */
	 * here.
	 */
	struct lu_object_header  coh_lu;
	/** \name locks
	 * \todo XXX move locks below to the separate cache-lines, they are
@@ -464,7 +467,8 @@ struct cl_object_header {
#define CL_PAGE_EOF ((pgoff_t)~0ull)

/** \addtogroup cl_page cl_page
 * @{ */
 * @{
 */

/** \struct cl_page
 * Layered client page.
@@ -687,12 +691,14 @@ enum cl_page_state {

enum cl_page_type {
	/** Host page, the page is from the host inode which the cl_page
	 * belongs to. */
	 * belongs to.
	 */
	CPT_CACHEABLE = 1,

	/** Transient page, the transient cl_page is used to bind a cl_page
	 *  to vmpage which is not belonging to the same object of cl_page.
	 *  it is used in DirectIO, lockless IO and liblustre. */
	 *  it is used in DirectIO, lockless IO and liblustre.
	 */
	CPT_TRANSIENT,
};

@@ -728,7 +734,8 @@ struct cl_page {
	/** Parent page, NULL for top-level page. Immutable after creation. */
	struct cl_page	  *cp_parent;
	/** Lower-layer page. NULL for bottommost page. Immutable after
	 * creation. */
	 * creation.
	 */
	struct cl_page	  *cp_child;
	/**
	 * Page state. This field is const to avoid accidental update, it is
@@ -1126,7 +1133,8 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
/** @} cl_page */

/** \addtogroup cl_lock cl_lock
 * @{ */
 * @{
 */
/** \struct cl_lock
 *
 * Extent locking on the client.
@@ -1641,7 +1649,8 @@ struct cl_lock {
struct cl_lock_slice {
	struct cl_lock		  *cls_lock;
	/** Object slice corresponding to this lock slice. Immutable after
	 * creation. */
	 * creation.
	 */
	struct cl_object		*cls_obj;
	const struct cl_lock_operations *cls_ops;
	/** Linkage into cl_lock::cll_layers. Immutable after creation. */
@@ -1885,7 +1894,8 @@ struct cl_2queue {
/** @} cl_page_list */

/** \addtogroup cl_io cl_io
 * @{ */
 * @{
 */
/** \struct cl_io
 * I/O
 *
@@ -2284,7 +2294,8 @@ enum cl_fsync_mode {
	/** discard all of dirty pages in a specific file range */
	CL_FSYNC_DISCARD = 2,
	/** start writeback and make sure they have reached storage before
	 * return. OST_SYNC RPC must be issued and finished */
	 * return. OST_SYNC RPC must be issued and finished
	 */
	CL_FSYNC_ALL   = 3
};

@@ -2403,7 +2414,8 @@ struct cl_io {
/** @} cl_io */

/** \addtogroup cl_req cl_req
 * @{ */
 * @{
 */
/** \struct cl_req
 * Transfer.
 *
@@ -2582,7 +2594,8 @@ enum cache_stats_item {
	/** how many entities are in the cache right now */
	CS_total,
	/** how many entities in the cache are actively used (and cannot be
	 * evicted) right now */
	 * evicted) right now
	 */
	CS_busy,
	/** how many entities were created at all */
	CS_create,
@@ -2725,7 +2738,8 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
/** @} helpers */

/** \defgroup cl_object cl_object
 * @{ */
 * @{
 */
struct cl_object *cl_object_top (struct cl_object *o);
struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
				 const struct lu_fid *fid,
@@ -2770,7 +2784,8 @@ static inline void *cl_object_page_slice(struct cl_object *clob,
/** @} cl_object */

/** \defgroup cl_page cl_page
 * @{ */
 * @{
 */
enum {
	CLP_GANG_OKAY = 0,
	CLP_GANG_RESCHED,
@@ -2888,7 +2903,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
/** @} cl_page */

/** \defgroup cl_lock cl_lock
 * @{ */
 * @{
 */

struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
			     const struct cl_lock_descr *need,
@@ -2966,7 +2982,8 @@ int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
 *
 * cl_use_try()     NONE	 cl_lock_operations::clo_use()     CLS_HELD
 *
 * @{ */
 * @{
 */

int   cl_wait       (const struct lu_env *env, struct cl_lock *lock);
void  cl_unuse      (const struct lu_env *env, struct cl_lock *lock);
@@ -3019,7 +3036,8 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
/** @} cl_lock */

/** \defgroup cl_io cl_io
 * @{ */
 * @{
 */

int   cl_io_init	 (const struct lu_env *env, struct cl_io *io,
			  enum cl_io_type iot, struct cl_object *obj);
@@ -3094,7 +3112,8 @@ do { \
/** @} cl_io */

/** \defgroup cl_page_list cl_page_list
 * @{ */
 * @{
 */

/**
 * Last page in the page list.
@@ -3137,7 +3156,8 @@ void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
/** @} cl_page_list */

/** \defgroup cl_req cl_req
 * @{ */
 * @{
 */
struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
			    enum cl_req_type crt, int nr_objects);

@@ -3214,7 +3234,8 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
 *       - cl_env_reexit(cl_env_reenter had to be called priorly)
 *
 * \see lu_env, lu_context, lu_context_key
 * @{ */
 * @{
 */

struct cl_env_nest {
	int   cen_refcheck;
+2 −1
Original line number Diff line number Diff line
@@ -383,7 +383,8 @@ void cl_put_grouplock(struct ccc_grouplock *cg);
 *
 * NB: If you find you have to use these interfaces for your new code, please
 * think about it again. These interfaces may be removed in the future for
 * better layering. */
 * better layering.
 */
struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
int lov_read_and_clear_async_rc(struct cl_object *clob);
+8 −5
Original line number Diff line number Diff line
@@ -175,7 +175,8 @@ struct lprocfs_percpu {
enum lprocfs_stats_flags {
	LPROCFS_STATS_FLAG_NONE     = 0x0000, /* per cpu counter */
	LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
					       * area and need locking */
					       * area and need locking
					       */
	LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */
};

@@ -196,7 +197,8 @@ struct lprocfs_stats {
	unsigned short			ls_biggest_alloc_num;
	enum lprocfs_stats_flags	ls_flags;
	/* Lock used when there are no percpu stats areas; For percpu stats,
	 * it is used to protect ls_biggest_alloc_num change */
	 * it is used to protect ls_biggest_alloc_num change
	 */
	spinlock_t			ls_lock;

	/* has ls_num of counter headers */
@@ -611,9 +613,10 @@ int lprocfs_single_release(struct inode *, struct file *);
int lprocfs_seq_release(struct inode *, struct file *);

/* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
  proc entries; otherwise, you will define name##_seq_write function also for
  a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
  call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */
 * proc entries; otherwise, you will define name##_seq_write function also for
 * a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
 * call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data);
 */
#define __LPROC_SEQ_FOPS(name, custom_seq_write)			\
static int name##_single_open(struct inode *inode, struct file *file)	\
{									\
+4 −2
Original line number Diff line number Diff line
@@ -166,7 +166,8 @@ struct lu_device_operations {
 */
enum loc_flags {
	/* This is a new object to be allocated, or the file
	 * corresponding to the object does not exists. */
	 * corresponding to the object does not exists.
	 */
	LOC_F_NEW	= 0x00000001,
};

@@ -895,7 +896,8 @@ enum lu_xattr_flags {
/** @} helpers */

/** \name lu_context
 * @{ */
 * @{
 */

/** For lu_context health-checks */
enum lu_context_state {
+39 −24
Original line number Diff line number Diff line
@@ -47,9 +47,11 @@

struct ll_fiemap_extent {
	__u64 fe_logical;  /* logical offset in bytes for the start of
			    * the extent from the beginning of the file */
			    * the extent from the beginning of the file
			    */
	__u64 fe_physical; /* physical offset in bytes for the start
			    * of the extent from the beginning of the disk */
			    * of the extent from the beginning of the disk
			    */
	__u64 fe_length;   /* length in bytes for this extent */
	__u64 fe_reserved64[2];
	__u32 fe_flags;    /* FIEMAP_EXTENT_* flags for this extent */
@@ -59,9 +61,11 @@ struct ll_fiemap_extent {

struct ll_user_fiemap {
	__u64 fm_start;  /* logical offset (inclusive) at
			  * which to start mapping (in) */
			  * which to start mapping (in)
			  */
	__u64 fm_length; /* logical length of mapping which
			  * userspace wants (in) */
			  * userspace wants (in)
			  */
	__u32 fm_flags;  /* FIEMAP_FLAG_* flags for request (in/out) */
	__u32 fm_mapped_extents;/* number of extents that were mapped (out) */
	__u32 fm_extent_count;  /* size of fm_extents array (in) */
@@ -71,28 +75,38 @@ struct ll_user_fiemap {

#define FIEMAP_MAX_OFFSET      (~0ULL)

#define FIEMAP_FLAG_SYNC	 0x00000001 /* sync file data before map */
#define FIEMAP_FLAG_XATTR	0x00000002 /* map extended attribute tree */

#define FIEMAP_FLAG_SYNC		0x00000001 /* sync file data before
						    * map
						    */
#define FIEMAP_FLAG_XATTR		0x00000002 /* map extended attribute
						    * tree
						    */
#define FIEMAP_EXTENT_LAST		0x00000001 /* Last extent in file. */
#define FIEMAP_EXTENT_UNKNOWN		0x00000002 /* Data location unknown. */
#define FIEMAP_EXTENT_DELALLOC		0x00000004 /* Location still pending.
						    * Sets EXTENT_UNKNOWN. */
						    * Sets EXTENT_UNKNOWN.
						    */
#define FIEMAP_EXTENT_ENCODED		0x00000008 /* Data can not be read
						    * while fs is unmounted */
						    * while fs is unmounted
						    */
#define FIEMAP_EXTENT_DATA_ENCRYPTED	0x00000080 /* Data is encrypted by fs.
						    * Sets EXTENT_NO_DIRECT. */
						    * Sets EXTENT_NO_DIRECT.
						    */
#define FIEMAP_EXTENT_NOT_ALIGNED       0x00000100 /* Extent offsets may not be
						    * block aligned. */
						    * block aligned.
						    */
#define FIEMAP_EXTENT_DATA_INLINE       0x00000200 /* Data mixed with metadata.
						    * Sets EXTENT_NOT_ALIGNED.*/
#define FIEMAP_EXTENT_DATA_TAIL		0x00000400 /* Multiple files in block.
						    * Sets EXTENT_NOT_ALIGNED.*/
						    * Sets EXTENT_NOT_ALIGNED.
						    */
#define FIEMAP_EXTENT_UNWRITTEN		0x00000800 /* Space allocated, but
						    * no data (i.e. zero). */
						    * no data (i.e. zero).
						    */
#define FIEMAP_EXTENT_MERGED		0x00001000 /* File does not natively
						    * support extents. Result
						    * merged for efficiency. */
						    * merged for efficiency.
						    */

static inline size_t fiemap_count_to_size(size_t extent_count)
{
@@ -115,6 +129,7 @@ static inline unsigned fiemap_size_to_count(size_t array_size)
/* Lustre specific flags - use a high bit, don't conflict with upstream flag */
#define FIEMAP_EXTENT_NO_DIRECT	 0x40000000 /* Data mapping undefined */
#define FIEMAP_EXTENT_NET	 0x80000000 /* Data stored remotely.
						    * Sets NO_DIRECT flag */
					     * Sets NO_DIRECT flag
					     */

#endif /* _LUSTRE_FIEMAP_H */
Loading