Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3af91a12 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6

* 'linux-next' of git://git.infradead.org/ubifs-2.6:
  UBIFS: fix-up free space earlier
  UBIFS: intialize LPT earlier
  UBIFS: assert no fixup when writing a node
  UBIFS: fix clean znode counter corruption in error cases
  UBIFS: fix memory leak on error path
  UBIFS: fix shrinker object count reports
  UBIFS: fix recovery broken by the previous recovery fix
  UBIFS: amend ubifs_recover_leb interface
  UBIFS: introduce a "grouped" journal head flag
  UBIFS: supress false error messages
parents 9a44fde3 09801194
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -581,6 +581,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
	ubifs_assert(wbuf->size % c->min_io_size == 0);
	ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
	ubifs_assert(!c->ro_media && !c->ro_mount);
	ubifs_assert(!c->space_fixup);
	if (c->leb_size - wbuf->offs >= c->max_write_size)
		ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));

@@ -759,6 +760,7 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
	ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
	ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
	ubifs_assert(!c->ro_media && !c->ro_mount);
	ubifs_assert(!c->space_fixup);

	if (c->ro_error)
		return -EROFS;
+1 −0
Original line number Diff line number Diff line
@@ -669,6 +669,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,

out_release:
	release_head(c, BASEHD);
	kfree(dent);
out_ro:
	ubifs_ro_mode(c, err);
	if (last_reference)
+1 −1
Original line number Diff line number Diff line
@@ -674,7 +674,7 @@ static int kill_orphans(struct ubifs_info *c)
		if (IS_ERR(sleb)) {
			if (PTR_ERR(sleb) == -EUCLEAN)
				sleb = ubifs_recover_leb(c, lnum, 0,
							 c->sbuf, 0);
							 c->sbuf, -1);
			if (IS_ERR(sleb)) {
				err = PTR_ERR(sleb);
				break;
+94 −70
Original line number Diff line number Diff line
@@ -564,19 +564,15 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
}

/**
 * drop_last_node - drop the last node or group of nodes.
 * drop_last_group - drop the last group of nodes.
 * @sleb: scanned LEB information
 * @offs: offset of dropped nodes is returned here
 * @grouped: non-zero if whole group of nodes have to be dropped
 *
 * This is a helper function for 'ubifs_recover_leb()' which drops the last
 * node of the scanned LEB or the last group of nodes if @grouped is not zero.
 * This function returns %1 if a node was dropped and %0 otherwise.
 * group of nodes of the scanned LEB.
 */
static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
{
	int dropped = 0;

	while (!list_empty(&sleb->nodes)) {
		struct ubifs_scan_node *snod;
		struct ubifs_ch *ch;
@@ -585,17 +581,40 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
				  list);
		ch = snod->node;
		if (ch->group_type != UBIFS_IN_NODE_GROUP)
			return dropped;
		dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs);
			break;

		dbg_rcvry("dropping grouped node at %d:%d",
			  sleb->lnum, snod->offs);
		*offs = snod->offs;
		list_del(&snod->list);
		kfree(snod);
		sleb->nodes_cnt -= 1;
	}
}

/**
 * drop_last_node - drop the last node.
 * @sleb: scanned LEB information
 * @offs: offset of dropped nodes is returned here
 * @grouped: non-zero if whole group of nodes have to be dropped
 *
 * This is a helper function for 'ubifs_recover_leb()' which drops the last
 * node of the scanned LEB.
 */
static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
{
	struct ubifs_scan_node *snod;

	if (!list_empty(&sleb->nodes)) {
		snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
				  list);

		dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
		*offs = snod->offs;
		list_del(&snod->list);
		kfree(snod);
		sleb->nodes_cnt -= 1;
		dropped = 1;
		if (!grouped)
			break;
	}
	return dropped;
}

/**
@@ -604,7 +623,8 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
 * @lnum: LEB number
 * @offs: offset
 * @sbuf: LEB-sized buffer to use
 * @grouped: nodes may be grouped for recovery
 * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not
 *         belong to any journal head)
 *
 * This function does a scan of a LEB, but caters for errors that might have
 * been caused by the unclean unmount from which we are attempting to recover.
@@ -612,13 +632,14 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
 * found, and a negative error code in case of failure.
 */
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
					 int offs, void *sbuf, int grouped)
					 int offs, void *sbuf, int jhead)
{
	int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
	int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
	struct ubifs_scan_leb *sleb;
	void *buf = sbuf + offs;

	dbg_rcvry("%d:%d", lnum, offs);
	dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);

	sleb = ubifs_start_scan(c, lnum, offs, sbuf);
	if (IS_ERR(sleb))
@@ -635,7 +656,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
		 * Scan quietly until there is an error from which we cannot
		 * recover
		 */
		ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
		ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
		if (ret == SCANNED_A_NODE) {
			/* A valid node, and not a padding node */
			struct ubifs_ch *ch = buf;
@@ -695,59 +716,62 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
		 * If nodes are grouped, always drop the incomplete group at
		 * the end.
		 */
		drop_last_node(sleb, &offs, 1);
		drop_last_group(sleb, &offs);

	if (jhead == GCHD) {
		/*
	 * While we are in the middle of the same min. I/O unit keep dropping
	 * nodes. So basically, what we want is to make sure that the last min.
	 * I/O unit where we saw the corruption is dropped completely with all
	 * the uncorrupted node which may possibly sit there.
		 * If this LEB belongs to the GC head then while we are in the
		 * middle of the same min. I/O unit keep dropping nodes. So
		 * basically, what we want is to make sure that the last min.
		 * I/O unit where we saw the corruption is dropped completely
		 * with all the uncorrupted nodes which may possibly sit there.
		 *
	 * In other words, let's name the min. I/O unit where the corruption
	 * starts B, and the previous min. I/O unit A. The below code tries to
	 * deal with a situation when half of B contains valid nodes or the end
	 * of a valid node, and the second half of B contains corrupted data or
	 * garbage. This means that UBIFS had been writing to B just before the
	 * power cut happened. I do not know how realistic is this scenario
	 * that half of the min. I/O unit had been written successfully and the
	 * other half not, but this is possible in our 'failure mode emulation'
	 * infrastructure at least.
		 * In other words, let's name the min. I/O unit where the
		 * corruption starts B, and the previous min. I/O unit A. The
		 * below code tries to deal with a situation when half of B
		 * contains valid nodes or the end of a valid node, and the
		 * second half of B contains corrupted data or garbage. This
		 * means that UBIFS had been writing to B just before the power
		 * cut happened. I do not know how realistic is this scenario
		 * that half of the min. I/O unit had been written successfully
		 * and the other half not, but this is possible in our 'failure
		 * mode emulation' infrastructure at least.
		 *
	 * So what is the problem, why we need to drop those nodes? Whey can't
	 * we just clean-up the second half of B by putting a padding node
	 * there? We can, and this works fine with one exception which was
	 * reproduced with power cut emulation testing and happens extremely
	 * rarely. The description follows, but it is worth noting that that is
	 * only about the GC head, so we could do this trick only if the bud
	 * belongs to the GC head, but it does not seem to be worth an
	 * additional "if" statement.
		 * So what is the problem, why we need to drop those nodes? Why
		 * can't we just clean-up the second half of B by putting a
		 * padding node there? We can, and this works fine with one
		 * exception which was reproduced with power cut emulation
		 * testing and happens extremely rarely.
		 *
	 * So, imagine the file-system is full, we run GC which is moving valid
	 * nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head
	 * LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X
	 * and will try to continue. Imagine that LEB X is currently the
	 * dirtiest LEB, and the amount of used space in LEB Y is exactly the
	 * same as amount of free space in LEB X.
		 * Imagine the file-system is full, we run GC which starts
		 * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
		 * the current GC head LEB). The @c->gc_lnum is -1, which means
		 * that GC will retain LEB X and will try to continue. Imagine
		 * that LEB X is currently the dirtiest LEB, and the amount of
		 * used space in LEB Y is exactly the same as amount of free
		 * space in LEB X.
		 *
	 * And a power cut happens when nodes are moved from LEB X to LEB Y. We
	 * are here trying to recover LEB Y which is the GC head LEB. We find
	 * the min. I/O unit B as described above. Then we clean-up LEB Y by
	 * padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function
	 * fails, because it cannot find a dirty LEB which could be GC'd into
	 * LEB Y! Even LEB X does not match because the amount of valid nodes
	 * there does not fit the free space in LEB Y any more! And this is
		 * And a power cut happens when nodes are moved from LEB X to
		 * LEB Y. We are here trying to recover LEB Y which is the GC
		 * head LEB. We find the min. I/O unit B as described above.
		 * Then we clean-up LEB Y by padding min. I/O unit. And later
		 * 'ubifs_rcvry_gc_commit()' function fails, because it cannot
		 * find a dirty LEB which could be GC'd into LEB Y! Even LEB X
		 * does not match because the amount of valid nodes there does
		 * not fit the free space in LEB Y any more! And this is
		 * because of the padding node which we added to LEB Y. The
	 * user-visible effect of this which I once observed and analysed is
	 * that we cannot mount the file-system with -ENOSPC error.
		 * user-visible effect of this which I once observed and
		 * analysed is that we cannot mount the file-system with
		 * -ENOSPC error.
		 *
	 * So obviously, to make sure that situation does not happen we should
	 * free min. I/O unit B in LEB Y completely and the last used min. I/O
	 * unit in LEB Y should be A. This is basically what the below code
	 * tries to do.
		 * So obviously, to make sure that situation does not happen we
		 * should free min. I/O unit B in LEB Y completely and the last
		 * used min. I/O unit in LEB Y should be A. This is basically
		 * what the below code tries to do.
		 */
	while (min_io_unit == round_down(offs, c->min_io_size) &&
	       min_io_unit != offs &&
	       drop_last_node(sleb, &offs, grouped));
		while (offs > min_io_unit)
			drop_last_node(sleb, &offs);
	}

	buf = sbuf + offs;
	len = c->leb_size - offs;
@@ -881,7 +905,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
		}
		ubifs_scan_destroy(sleb);
	}
	return ubifs_recover_leb(c, lnum, offs, sbuf, 0);
	return ubifs_recover_leb(c, lnum, offs, sbuf, -1);
}

/**
+1 −2
Original line number Diff line number Diff line
@@ -557,8 +557,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
		 * these LEBs could possibly be written to at the power cut
		 * time.
		 */
		sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf,
					 b->bud->jhead != GCHD);
		sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead);
	else
		sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
	if (IS_ERR(sleb))
Loading