Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 11d66e89 authored by Masanari Iida's avatar Masanari Iida Committed by Greg Kroah-Hartman
Browse files

staging: lustre: Fix typo in lustre/lustre/osc



Correct spelling typo in lustre/lustre/osc

Signed-off-by: default avatarMasanari Iida <standby24x7@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b64767de
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -1703,7 +1703,7 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
	return is_ready;
}

/* this is trying to propogate async writeback errors back up to the
/* this is trying to propagate async writeback errors back up to the
 * application.  As an async write fails we record the error code for later if
 * the app does an fsync.  As long as errors persist we force future rpcs to be
 * sync so that the app can get a sync error and break the cycle of queueing
@@ -2006,7 +2006,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli)
	/* then if we have cache waiters, return all objects with queued
	 * writes.  This is especially important when many small files
	 * have filled up the cache and not been fired into rpcs because
	 * they don't pass the nr_pending/object threshhold */
	 * they don't pass the nr_pending/object threshold */
	if (!list_empty(&cli->cl_cache_waiters) &&
	    !list_empty(&cli->cl_loi_write_list))
		return list_to_obj(&cli->cl_loi_write_list, write_item);
@@ -2226,7 +2226,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
	/* Add this page into extent by the following steps:
	 * 1. if there exists an active extent for this IO, mostly this page
	 *    can be added to the active extent and sometimes we need to
	 *    expand extent to accomodate this page;
	 *    expand extent to accommodate this page;
	 * 2. otherwise, a new extent will be allocated. */

	ext = oio->oi_active;
+1 −1
Original line number Diff line number Diff line
@@ -299,7 +299,7 @@ struct osc_lock {
				 ols_flush:1,
	/**
	 * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
	 * the EVAVAIL error as torerable, this will make upper logic happy
	 * the EVAVAIL error as tolerable, this will make upper logic happy
	 * to wait all glimpse locks to each OSTs to be completed.
	 * Glimpse lock converts to normal lock if the server lock is
	 * granted.
+1 −1
Original line number Diff line number Diff line
@@ -929,7 +929,7 @@ static void osc_lock_build_einfo(const struct lu_env *env,
 * Determine if the lock should be converted into a lockless lock.
 *
 * Steps to check:
 * - if the lock has an explicite requirment for a non-lockless lock;
 * - if the lock has an explicit requirement for a non-lockless lock;
 * - if the io lock request type ci_lockreq;
 * - send the enqueue rpc to ost to make the further decision;
 * - special treat to truncate lockless lock
+2 −2
Original line number Diff line number Diff line
@@ -587,7 +587,7 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
 * number of pages to avoid running out of LRU budget, and.. */
static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT);  /* 2M */
/* free this number at most otherwise it will take too long time to finsih. */
/* free this number at most otherwise it will take too long time to finish. */
static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */

/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
@@ -606,7 +606,7 @@ static int osc_cache_too_much(struct client_obd *cli)
		return min(pages, lru_shrink_max);

	/* if it's going to run out LRU slots, we should free some, but not
	 * too much to maintain faireness among OSCs. */
	 * too much to maintain fairness among OSCs. */
	if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
		unsigned long tmp;

+1 −1
Original line number Diff line number Diff line
@@ -773,7 +773,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
	osc_pack_capa(req, body, (struct obd_capa *)capa);
	ptlrpc_request_set_replen(req);

	/* If osc_destory is for destroying the unlink orphan,
	/* If osc_destroy is for destroying the unlink orphan,
	 * sent from MDT to OST, which should not be blocked here,
	 * because the process might be triggered by ptlrpcd, and
	 * it is not good to block ptlrpcd thread (b=16006)*/