Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 96c53363 authored by Jinshan Xiong's avatar Jinshan Xiong Committed by Greg Kroah-Hartman
Browse files

staging: lustre: clio: Reduce memory overhead of per-page allocation



A page in clio used to occupy 584 bytes, which will use size-1024
slab cache. This patch reduces the per-page overhead to 512 bytes
so it can use size-512 instead.

Signed-off-by: default avatarJinshan Xiong <jinshan.xiong@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-4793
Reviewed-on: http://review.whamcloud.com/10070


Reviewed-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Reviewed-by: default avatarBobi Jam <bobijam@gmail.com>
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2e1b5b8b
Loading
Loading
Loading
Loading
+9 −28
Original line number Original line Diff line number Diff line
@@ -689,17 +689,6 @@ enum cl_page_type {
	CPT_TRANSIENT,
	CPT_TRANSIENT,
};
};


/**
 * Flags maintained for every cl_page.
 */
enum cl_page_flags {
	/**
	 * Set when pagein completes. Used for debugging (read completes at
	 * most once for a page).
	 */
	CPF_READ_COMPLETED = 1 << 0
};

/**
/**
 * Fields are protected by the lock on struct page, except for atomics and
 * Fields are protected by the lock on struct page, except for atomics and
 * immutables.
 * immutables.
@@ -712,26 +701,23 @@ enum cl_page_flags {
struct cl_page {
struct cl_page {
	/** Reference counter. */
	/** Reference counter. */
	atomic_t	     cp_ref;
	atomic_t	     cp_ref;
	/** Transfer error. */
	int			 cp_error;
	/** An object this page is a part of. Immutable after creation. */
	/** An object this page is a part of. Immutable after creation. */
	struct cl_object	*cp_obj;
	struct cl_object	*cp_obj;
	/** List of slices. Immutable after creation. */
	struct list_head	       cp_layers;
	/** vmpage */
	/** vmpage */
	struct page		*cp_vmpage;
	struct page		*cp_vmpage;
	/** Linkage of pages within group. Pages must be owned */
	struct list_head	 cp_batch;
	/** List of slices. Immutable after creation. */
	struct list_head	 cp_layers;
	/** Linkage of pages within cl_req. */
	struct list_head         cp_flight;
	/**
	/**
	 * Page state. This field is const to avoid accidental update, it is
	 * Page state. This field is const to avoid accidental update, it is
	 * modified only internally within cl_page.c. Protected by a VM lock.
	 * modified only internally within cl_page.c. Protected by a VM lock.
	 */
	 */
	const enum cl_page_state cp_state;
	const enum cl_page_state cp_state;
	/** Linkage of pages within group. Protected by cl_page::cp_mutex. */
	struct list_head		cp_batch;
	/** Mutex serializing membership of a page in a batch. */
	struct mutex		cp_mutex;
	/** Linkage of pages within cl_req. */
	struct list_head	       cp_flight;
	/** Transfer error. */
	int		      cp_error;

	/**
	/**
	 * Page type. Only CPT_TRANSIENT is used so far. Immutable after
	 * Page type. Only CPT_TRANSIENT is used so far. Immutable after
	 * creation.
	 * creation.
@@ -743,10 +729,6 @@ struct cl_page {
	 * by sub-io. Protected by a VM lock.
	 * by sub-io. Protected by a VM lock.
	 */
	 */
	struct cl_io	    *cp_owner;
	struct cl_io	    *cp_owner;
	/**
	 * Debug information, the task is owning the page.
	 */
	struct task_struct	*cp_task;
	/**
	/**
	 * Owning IO request in cl_page_state::CPS_PAGEOUT and
	 * Owning IO request in cl_page_state::CPS_PAGEOUT and
	 * cl_page_state::CPS_PAGEIN states. This field is maintained only in
	 * cl_page_state::CPS_PAGEIN states. This field is maintained only in
@@ -759,8 +741,6 @@ struct cl_page {
	struct lu_ref_link       cp_obj_ref;
	struct lu_ref_link       cp_obj_ref;
	/** Link to a queue, for debugging. */
	/** Link to a queue, for debugging. */
	struct lu_ref_link       cp_queue_ref;
	struct lu_ref_link       cp_queue_ref;
	/** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
	unsigned                 cp_flags;
	/** Assigned if doing a sync_io */
	/** Assigned if doing a sync_io */
	struct cl_sync_io       *cp_sync_io;
	struct cl_sync_io       *cp_sync_io;
};
};
@@ -2200,6 +2180,7 @@ static inline void cl_object_page_init(struct cl_object *clob, int size)
{
{
	clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
	clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
	cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
	cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
	WARN_ON(cl_object_header(clob)->coh_page_bufsize > 512);
}
}


static inline void *cl_object_page_slice(struct cl_object *clob,
static inline void *cl_object_page_slice(struct cl_object *clob,
+3 −3
Original line number Original line Diff line number Diff line
@@ -247,9 +247,9 @@ struct vvp_object {
 */
 */
struct vvp_page {
struct vvp_page {
	struct cl_page_slice vpg_cl;
	struct cl_page_slice vpg_cl;
	int		  vpg_defer_uptodate;
	unsigned int	vpg_defer_uptodate:1,
	int		  vpg_ra_used;
			vpg_ra_used:1,
	int		  vpg_write_queued;
			vpg_write_queued:1;
	/**
	/**
	 * Non-empty iff this page is already counted in
	 * Non-empty iff this page is already counted in
	 * vvp_object::vob_pending_list. This list is only used as a flag,
	 * vvp_object::vob_pending_list. This list is only used as a flag,
+2 −2
Original line number Original line Diff line number Diff line
@@ -290,7 +290,7 @@ struct lov_lock {


struct lov_page {
struct lov_page {
	struct cl_page_slice	lps_cl;
	struct cl_page_slice	lps_cl;
	int		  lps_invalid;
	unsigned int		lps_stripe; /* stripe index */
};
};


/*
/*
+2 −4
Original line number Original line Diff line number Diff line
@@ -244,14 +244,12 @@ void lov_sub_put(struct lov_io_sub *sub)


int lov_page_stripe(const struct cl_page *page)
int lov_page_stripe(const struct cl_page *page)
{
{
	struct lovsub_object *subobj;
	const struct cl_page_slice *slice;
	const struct cl_page_slice *slice;


	slice = cl_page_at(page, &lovsub_device_type);
	slice = cl_page_at(page, &lov_device_type);
	LASSERT(slice->cpl_obj);
	LASSERT(slice->cpl_obj);


	subobj = cl2lovsub(slice->cpl_obj);
	return cl2lov_page(slice)->lps_stripe;
	return subobj->lso_index;
}
}


struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
+1 −0
Original line number Original line Diff line number Diff line
@@ -129,6 +129,7 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
	rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff);
	rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff);
	LASSERT(rc == 0);
	LASSERT(rc == 0);


	lpg->lps_stripe = stripe;
	cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_raid0_page_ops);
	cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_raid0_page_ops);


	sub = lov_sub_get(env, lio, stripe);
	sub = lov_sub_get(env, lio, stripe);
Loading