Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1b02bde3 authored by Emoly Liu's avatar Emoly Liu Committed by Greg Kroah-Hartman
Browse files

staging/lustre/llite: allocate and free client cache asynchronously



Since the inflight request holds import refcount as well as export,
sometimes obd_disconnect() in client_common_put_super() can't put
the last refcount of OSC import (e.g. due to network disconnection),
this will cause cl_cache being accessed after free.

To fix this issue, ccc_users is used as cl_cache refcount, and
lov/llite/osc all hold one cl_cache refcount respectively, to avoid
the race that a new OST is being added into the system when the client
is mounted.
The following cl_cache functions are added:
- cl_cache_init(): allocate and initialize cl_cache
- cl_cache_incref(): increase cl_cache refcount
- cl_cache_decref(): decrease cl_cache refcount and free the cache
  if refcount=0.

Signed-off-by: default avatarEmoly Liu <emoly.liu@intel.com>
Reviewed-on: http://review.whamcloud.com/13746
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6173


Reviewed-by: default avatarNiu Yawei <yawei.niu@intel.com>
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 341f1f0a
Loading
Loading
Loading
Loading
+9 −1
Original line number Diff line number Diff line
@@ -2322,7 +2322,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
 */
struct cl_client_cache {
	/**
	 * # of users (OSCs)
	 * # of client cache refcount
	 * # of users (OSCs) + 2 (held by llite and lov)
	 */
	atomic_t		ccc_users;
	/**
@@ -2357,6 +2358,13 @@ struct cl_client_cache {

};

/**
 * cl_cache functions
 */
struct cl_client_cache *cl_cache_init(unsigned long lru_page_max);
void cl_cache_incref(struct cl_client_cache *cache);
void cl_cache_decref(struct cl_client_cache *cache);

/** @} cl_page */

/** \defgroup cl_lock cl_lock
+1 −1
Original line number Diff line number Diff line
@@ -415,7 +415,7 @@ struct lov_obd {
	enum lustre_sec_part    lov_sp_me;

	/* Cached LRU and unstable data from upper layer */
	void		       *lov_cache;
	struct cl_client_cache *lov_cache;

	struct rw_semaphore     lov_notify_lock;

+1 −1
Original line number Diff line number Diff line
@@ -452,7 +452,7 @@ struct ll_sb_info {
	 * any page which is sent to a server as part of a bulk request,
	 * but is uncommitted to stable storage.
	 */
	struct cl_client_cache    ll_cache;
	struct cl_client_cache    *ll_cache;

	struct lprocfs_stats     *ll_ra_stats;

+15 −20
Original line number Diff line number Diff line
@@ -83,15 +83,11 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
	pages = si.totalram - si.totalhigh;
	lru_page_max = pages / 2;

	/* initialize ll_cache data */
	atomic_set(&sbi->ll_cache.ccc_users, 0);
	sbi->ll_cache.ccc_lru_max = lru_page_max;
	atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
	spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
	INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);

	atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
	init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
	sbi->ll_cache = cl_cache_init(lru_page_max);
	if (!sbi->ll_cache) {
		kfree(sbi);
		return NULL;
	}

	sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
					   SBI_DEFAULT_READAHEAD_MAX);
@@ -131,6 +127,11 @@ static void ll_free_sbi(struct super_block *sb)
{
	struct ll_sb_info *sbi = ll_s2sbi(sb);

	if (sbi->ll_cache) {
		cl_cache_decref(sbi->ll_cache);
		sbi->ll_cache = NULL;
	}

	kfree(sbi);
}

@@ -488,8 +489,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
	cl_sb_init(sb);

	err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
				 KEY_CACHE_SET, sizeof(sbi->ll_cache),
				 &sbi->ll_cache, NULL);
				 KEY_CACHE_SET, sizeof(*sbi->ll_cache),
				 sbi->ll_cache, NULL);

	sb->s_root = d_make_root(root);
	if (!sb->s_root) {
@@ -534,8 +535,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
out_dt:
	obd_disconnect(sbi->ll_dt_exp);
	sbi->ll_dt_exp = NULL;
	/* Make sure all OScs are gone, since cl_cache is accessing sbi. */
	obd_zombie_barrier();
out_md_fid:
	obd_fid_fini(sbi->ll_md_exp->exp_obd);
out_md:
@@ -585,10 +584,6 @@ static void client_common_put_super(struct super_block *sb)
	obd_fid_fini(sbi->ll_dt_exp->exp_obd);
	obd_disconnect(sbi->ll_dt_exp);
	sbi->ll_dt_exp = NULL;
	/* wait till all OSCs are gone, since cl_cache is accessing sbi.
	 * see LU-2543.
	 */
	obd_zombie_barrier();

	ldebugfs_unregister_mountpoint(sbi);

@@ -921,12 +916,12 @@ void ll_put_super(struct super_block *sb)
	if (!force) {
		struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);

		rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
				  !atomic_read(&sbi->ll_cache.ccc_unstable_nr),
		rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
				  !atomic_read(&sbi->ll_cache->ccc_unstable_nr),
				  &lwi);
	}

	ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
	ccc_count = atomic_read(&sbi->ll_cache->ccc_unstable_nr);
	if (!force && rc != -EINTR)
		LASSERTF(!ccc_count, "count: %i\n", ccc_count);

+3 −3
Original line number Diff line number Diff line
@@ -356,7 +356,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
{
	struct super_block     *sb    = m->private;
	struct ll_sb_info      *sbi   = ll_s2sbi(sb);
	struct cl_client_cache *cache = &sbi->ll_cache;
	struct cl_client_cache *cache = sbi->ll_cache;
	int shift = 20 - PAGE_SHIFT;
	int max_cached_mb;
	int unused_mb;
@@ -383,7 +383,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
{
	struct super_block *sb = ((struct seq_file *)file->private_data)->private;
	struct ll_sb_info *sbi = ll_s2sbi(sb);
	struct cl_client_cache *cache = &sbi->ll_cache;
	struct cl_client_cache *cache = sbi->ll_cache;
	struct lu_env *env;
	int refcheck;
	int mult, rc, pages_number;
@@ -822,7 +822,7 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
{
	struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
					      ll_kobj);
	struct cl_client_cache *cache = &sbi->ll_cache;
	struct cl_client_cache *cache = sbi->ll_cache;
	int pages, mb;

	pages = atomic_read(&cache->ccc_unstable_nr);
Loading