Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7fab479b authored by Dave Kleikamp's avatar Dave Kleikamp Committed by Linus Torvalds
Browse files

[PATCH] JFS: Support page sizes greater than 4K



jfs has never worked on architecutures where the page size was not 4K.

Signed-off-by: default avatarDave Kleikamp <shaggy@austin.ibm.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent dc5798d9
Loading
Loading
Loading
Loading
+10 −21
Original line number Diff line number Diff line
@@ -175,31 +175,22 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
{
	s64 lblock64 = lblock;
	int rc = 0;
	int take_locks;
	xad_t xad;
	s64 xaddr;
	int xflag;
	s32 xlen;

	/*
	 * If this is a special inode (imap, dmap)
	 * the lock should already be taken
	 */
	take_locks = (JFS_IP(ip)->fileset != AGGREGATE_I);

	/*
	 * Take appropriate lock on inode
	 */
	if (take_locks) {
	if (create)
		IWRITE_LOCK(ip);
	else
		IREAD_LOCK(ip);
	}

	if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
	    (xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, 0)
	     == 0) && xlen) {
	    (!xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, 0)) &&
	    xlen) {
		if (xflag & XAD_NOTRECORDED) {
			if (!create)
				/*
@@ -258,12 +249,10 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
	/*
	 * Release lock on inode
	 */
	if (take_locks) {
	if (create)
		IWRITE_UNLOCK(ip);
	else
		IREAD_UNLOCK(ip);
	}
	return rc;
}

+6 −6
Original line number Diff line number Diff line
@@ -471,6 +471,7 @@ dbUpdatePMap(struct inode *ipbmap,
	struct metapage *mp;
	struct jfs_log *log;
	int lsn, difft, diffp;
	unsigned long flags;

	/* the blocks better be within the mapsize. */
	if (blkno + nblocks > bmp->db_mapsize) {
@@ -504,6 +505,7 @@ dbUpdatePMap(struct inode *ipbmap,
					   0);
			if (mp == NULL)
				return -EIO;
			metapage_wait_for_io(mp);
		}
		dp = (struct dmap *) mp->data;

@@ -578,34 +580,32 @@ dbUpdatePMap(struct inode *ipbmap,
		if (mp->lsn != 0) {
			/* inherit older/smaller lsn */
			logdiff(diffp, mp->lsn, log);
			LOGSYNC_LOCK(log, flags);
			if (difft < diffp) {
				mp->lsn = lsn;

				/* move bp after tblock in logsync list */
				LOGSYNC_LOCK(log);
				list_move(&mp->synclist, &tblk->synclist);
				LOGSYNC_UNLOCK(log);
			}

			/* inherit younger/larger clsn */
			LOGSYNC_LOCK(log);
			logdiff(difft, tblk->clsn, log);
			logdiff(diffp, mp->clsn, log);
			if (difft > diffp)
				mp->clsn = tblk->clsn;
			LOGSYNC_UNLOCK(log);
			LOGSYNC_UNLOCK(log, flags);
		} else {
			mp->log = log;
			mp->lsn = lsn;

			/* insert bp after tblock in logsync list */
			LOGSYNC_LOCK(log);
			LOGSYNC_LOCK(log, flags);

			log->count++;
			list_add(&mp->synclist, &tblk->synclist);

			mp->clsn = tblk->clsn;
			LOGSYNC_UNLOCK(log);
			LOGSYNC_UNLOCK(log, flags);
		}
	}

+7 −7
Original line number Diff line number Diff line
@@ -502,7 +502,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)

	}

	ip->i_mapping->a_ops = &jfs_aops;
	ip->i_mapping->a_ops = &jfs_metapage_aops;
	mapping_set_gfp_mask(ip->i_mapping, GFP_NOFS);

	/* Allocations to metadata inodes should not affect quotas */
@@ -2791,6 +2791,7 @@ diUpdatePMap(struct inode *ipimap,
	u32 mask;
	struct jfs_log *log;
	int lsn, difft, diffp;
	unsigned long flags;

	imap = JFS_IP(ipimap)->i_imap;
	/* get the iag number containing the inode */
@@ -2807,6 +2808,7 @@ diUpdatePMap(struct inode *ipimap,
	IREAD_UNLOCK(ipimap);
	if (rc)
		return (rc);
	metapage_wait_for_io(mp);
	iagp = (struct iag *) mp->data;
	/* get the inode number and extent number of the inode within
	 * the iag and the inode number within the extent.
@@ -2870,30 +2872,28 @@ diUpdatePMap(struct inode *ipimap,
		/* inherit older/smaller lsn */
		logdiff(difft, lsn, log);
		logdiff(diffp, mp->lsn, log);
		LOGSYNC_LOCK(log, flags);
		if (difft < diffp) {
			mp->lsn = lsn;
			/* move mp after tblock in logsync list */
			LOGSYNC_LOCK(log);
			list_move(&mp->synclist, &tblk->synclist);
			LOGSYNC_UNLOCK(log);
		}
		/* inherit younger/larger clsn */
		LOGSYNC_LOCK(log);
		assert(mp->clsn);
		logdiff(difft, tblk->clsn, log);
		logdiff(diffp, mp->clsn, log);
		if (difft > diffp)
			mp->clsn = tblk->clsn;
		LOGSYNC_UNLOCK(log);
		LOGSYNC_UNLOCK(log, flags);
	} else {
		mp->log = log;
		mp->lsn = lsn;
		/* insert mp after tblock in logsync list */
		LOGSYNC_LOCK(log);
		LOGSYNC_LOCK(log, flags);
		log->count++;
		list_add(&mp->synclist, &tblk->synclist);
		mp->clsn = tblk->clsn;
		LOGSYNC_UNLOCK(log);
		LOGSYNC_UNLOCK(log, flags);
	}
	write_metapage(mp);
	return (0);
+1 −0
Original line number Diff line number Diff line
@@ -165,6 +165,7 @@ struct jfs_sb_info {
        /* Formerly in ipbmap */
	struct bmap	*bmap;		/* incore bmap descriptor	*/
	struct nls_table *nls_tab;	/* current codepage		*/
	struct inode *direct_inode;	/* metadata inode */
	uint		state;		/* mount/recovery state	*/
	unsigned long	flag;		/* mount time flags */
	uint		p_state;	/* state prior to going no integrity */
+40 −31
Original line number Diff line number Diff line
@@ -234,6 +234,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
	int lsn;
	int diffp, difft;
	struct metapage *mp = NULL;
	unsigned long flags;

	jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p",
		 log, tblk, lrd, tlck);
@@ -254,7 +255,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
	 */
	lsn = log->lsn;

	LOGSYNC_LOCK(log);
	LOGSYNC_LOCK(log, flags);

	/*
	 * initialize page lsn if first log write of the page
@@ -310,7 +311,7 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
		}
	}

	LOGSYNC_UNLOCK(log);
	LOGSYNC_UNLOCK(log, flags);

	/*
	 *      write the log record
@@ -334,7 +335,6 @@ int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
	return lsn;
}


/*
 * NAME:	lmWriteRecord()
 *
@@ -945,6 +945,15 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
	struct lrd lrd;
	int lsn;
	struct logsyncblk *lp;
	struct jfs_sb_info *sbi;
	unsigned long flags;

	/* push dirty metapages out to disk */
	list_for_each_entry(sbi, &log->sb_list, log_list) {
		filemap_flush(sbi->ipbmap->i_mapping);
		filemap_flush(sbi->ipimap->i_mapping);
		filemap_flush(sbi->direct_inode->i_mapping);
	}

	/*
	 *      forward syncpt
@@ -954,10 +963,7 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
	 */

	if (log->sync == log->syncpt) {
		LOGSYNC_LOCK(log);
		/* ToDo: push dirty metapages out to disk */
//              bmLogSync(log);

		LOGSYNC_LOCK(log, flags);
		if (list_empty(&log->synclist))
			log->sync = log->lsn;
		else {
@@ -965,7 +971,7 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
					struct logsyncblk, synclist);
			log->sync = lp->lsn;
		}
		LOGSYNC_UNLOCK(log);
		LOGSYNC_UNLOCK(log, flags);

	}

@@ -974,27 +980,6 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
	 * reset syncpt = sync
	 */
	if (log->sync != log->syncpt) {
		struct jfs_sb_info *sbi;

		/*
		 * We need to make sure all of the "written" metapages
		 * actually make it to disk
		 */
		list_for_each_entry(sbi, &log->sb_list, log_list) {
			if (sbi->flag & JFS_NOINTEGRITY)
				continue;
			filemap_fdatawrite(sbi->ipbmap->i_mapping);
			filemap_fdatawrite(sbi->ipimap->i_mapping);
			filemap_fdatawrite(sbi->sb->s_bdev->bd_inode->i_mapping);
		}
		list_for_each_entry(sbi, &log->sb_list, log_list) {
			if (sbi->flag & JFS_NOINTEGRITY)
				continue;
			filemap_fdatawait(sbi->ipbmap->i_mapping);
			filemap_fdatawait(sbi->ipimap->i_mapping);
			filemap_fdatawait(sbi->sb->s_bdev->bd_inode->i_mapping);
		}

		lrd.logtid = 0;
		lrd.backchain = 0;
		lrd.type = cpu_to_le16(LOG_SYNCPT);
@@ -1547,6 +1532,7 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
{
	int i;
	struct tblock *target = NULL;
	struct jfs_sb_info *sbi;

	/* jfs_write_inode may call us during read-only mount */
	if (!log)
@@ -1608,12 +1594,18 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
	if (wait < 2)
		return;

	list_for_each_entry(sbi, &log->sb_list, log_list) {
		filemap_fdatawrite(sbi->ipbmap->i_mapping);
		filemap_fdatawrite(sbi->ipimap->i_mapping);
		filemap_fdatawrite(sbi->direct_inode->i_mapping);
	}

	/*
	 * If there was recent activity, we may need to wait
	 * for the lazycommit thread to catch up
	 */
	if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) {
		for (i = 0; i < 800; i++) {	/* Too much? */
		for (i = 0; i < 200; i++) {	/* Too much? */
			msleep(250);
			if (list_empty(&log->cqueue) &&
			    list_empty(&log->synclist))
@@ -1621,7 +1613,24 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
		}
	}
	assert(list_empty(&log->cqueue));
	assert(list_empty(&log->synclist));
	if (!list_empty(&log->synclist)) {
		struct logsyncblk *lp;

		list_for_each_entry(lp, &log->synclist, synclist) {
			if (lp->xflag & COMMIT_PAGE) {
				struct metapage *mp = (struct metapage *)lp;
				dump_mem("orphan metapage", lp,
					 sizeof(struct metapage));
				dump_mem("page", mp->page, sizeof(struct page));
			}
			else
				dump_mem("orphan tblock", lp,
					 sizeof(struct tblock));
		}
//		current->state = TASK_INTERRUPTIBLE;
//		schedule();
	}
	//assert(list_empty(&log->synclist));
	clear_bit(log_FLUSH, &log->flag);
}

Loading