Loading fs/jfs/inode.c +4 −0 Original line number Diff line number Diff line Loading @@ -128,6 +128,10 @@ void jfs_delete_inode(struct inode *inode) { jfs_info("In jfs_delete_inode, inode = 0x%p", inode); if (is_bad_inode(inode) || (JFS_IP(inode)->fileset != cpu_to_le32(FILESYSTEM_I))) return; if (test_cflag(COMMIT_Freewmap, inode)) jfs_free_zero_link(inode); Loading fs/jfs/jfs_logmgr.c +19 −17 Original line number Diff line number Diff line Loading @@ -191,7 +191,7 @@ static int lbmIOWait(struct lbuf * bp, int flag); static bio_end_io_t lbmIODone; static void lbmStartIO(struct lbuf * bp); static void lmGCwrite(struct jfs_log * log, int cant_block); static int lmLogSync(struct jfs_log * log, int nosyncwait); static int lmLogSync(struct jfs_log * log, int hard_sync); Loading Loading @@ -915,19 +915,17 @@ static void lmPostGC(struct lbuf * bp) * if new sync address is available * (normally the case if sync() is executed by back-ground * process). * if not, explicitly run jfs_blogsync() to initiate * getting of new sync address. * calculate new value of i_nextsync which determines when * this code is called again. * * PARAMETERS: log - log structure * nosyncwait - 1 if called asynchronously * hard_sync - 1 to force all metadata to be written * * RETURN: 0 * * serialization: LOG_LOCK() held on entry/exit */ static int lmLogSync(struct jfs_log * log, int nosyncwait) static int lmLogSync(struct jfs_log * log, int hard_sync) { int logsize; int written; /* written since last syncpt */ Loading @@ -941,6 +939,13 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait) unsigned long flags; /* push dirty metapages out to disk */ if (hard_sync) list_for_each_entry(sbi, &log->sb_list, log_list) { filemap_fdatawrite(sbi->ipbmap->i_mapping); filemap_fdatawrite(sbi->ipimap->i_mapping); filemap_fdatawrite(sbi->direct_inode->i_mapping); } else list_for_each_entry(sbi, &log->sb_list, log_list) { filemap_flush(sbi->ipbmap->i_mapping); filemap_flush(sbi->ipimap->i_mapping); Loading Loading @@ -1021,10 +1026,6 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait) /* next syncpt trigger = written + more */ log->nextsync = written + more; /* return if lmLogSync() from outside of transaction, e.g., sync() */ if (nosyncwait) return lsn; /* if number of bytes written from last sync point is more * than 1/4 of the log size, stop new transactions from * starting until all current transactions are completed Loading @@ -1050,10 +1051,11 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait) * FUNCTION: write log SYNCPT record for specified log * * PARAMETERS: log - log structure * hard_sync - set to 1 to force metadata to be written */ void jfs_syncpt(struct jfs_log *log) void jfs_syncpt(struct jfs_log *log, int hard_sync) { LOG_LOCK(log); lmLogSync(log, 1); lmLogSync(log, hard_sync); LOG_UNLOCK(log); } Loading fs/jfs/jfs_logmgr.h +1 −1 Original line number Diff line number Diff line Loading @@ -510,6 +510,6 @@ extern int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize); extern int lmGroupCommit(struct jfs_log *, struct tblock *); extern int jfsIOWait(void *); extern void jfs_flush_journal(struct jfs_log * log, int wait); extern void jfs_syncpt(struct jfs_log *log); extern void jfs_syncpt(struct jfs_log *log, int hard_sync); #endif /* _H_JFS_LOGMGR */ fs/jfs/jfs_txnmgr.c +7 −5 Original line number Diff line number Diff line Loading @@ -552,6 +552,11 @@ void txEnd(tid_t tid) * synchronize with logsync barrier */ if (test_bit(log_SYNCBARRIER, &log->flag)) { TXN_UNLOCK(); /* write dirty metadata & forward log syncpt */ jfs_syncpt(log, 1); jfs_info("log barrier off: 0x%x", log->lsn); /* enable new transactions start */ Loading @@ -560,11 +565,6 @@ void txEnd(tid_t tid) /* wakeup all waitors for logsync barrier */ TXN_WAKEUP(&log->syncwait); TXN_UNLOCK(); /* forward log syncpt */ jfs_syncpt(log); goto wakeup; } } Loading Loading @@ -657,7 +657,9 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp, /* only anonymous txn. * Remove from anon_list */ TXN_LOCK(); list_del_init(&jfs_ip->anon_inode_list); TXN_UNLOCK(); } jfs_ip->atlhead = tlck->next; } else { Loading fs/jfs/super.c +3 −1 Original line number Diff line number Diff line Loading @@ -114,6 +114,8 @@ static void jfs_destroy_inode(struct inode *inode) { struct jfs_inode_info *ji = JFS_IP(inode); BUG_ON(!list_empty(&ji->anon_inode_list)); spin_lock_irq(&ji->ag_lock); if (ji->active_ag != -1) { struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; Loading Loading @@ -531,7 +533,7 @@ static int jfs_sync_fs(struct super_block *sb, int wait) /* log == NULL indicates read-only mount */ if (log) { jfs_flush_journal(log, wait); jfs_syncpt(log); jfs_syncpt(log, 0); } return 0; Loading Loading
fs/jfs/inode.c +4 −0 Original line number Diff line number Diff line Loading @@ -128,6 +128,10 @@ void jfs_delete_inode(struct inode *inode) { jfs_info("In jfs_delete_inode, inode = 0x%p", inode); if (is_bad_inode(inode) || (JFS_IP(inode)->fileset != cpu_to_le32(FILESYSTEM_I))) return; if (test_cflag(COMMIT_Freewmap, inode)) jfs_free_zero_link(inode); Loading
fs/jfs/jfs_logmgr.c +19 −17 Original line number Diff line number Diff line Loading @@ -191,7 +191,7 @@ static int lbmIOWait(struct lbuf * bp, int flag); static bio_end_io_t lbmIODone; static void lbmStartIO(struct lbuf * bp); static void lmGCwrite(struct jfs_log * log, int cant_block); static int lmLogSync(struct jfs_log * log, int nosyncwait); static int lmLogSync(struct jfs_log * log, int hard_sync); Loading Loading @@ -915,19 +915,17 @@ static void lmPostGC(struct lbuf * bp) * if new sync address is available * (normally the case if sync() is executed by back-ground * process). * if not, explicitly run jfs_blogsync() to initiate * getting of new sync address. * calculate new value of i_nextsync which determines when * this code is called again. * * PARAMETERS: log - log structure * nosyncwait - 1 if called asynchronously * hard_sync - 1 to force all metadata to be written * * RETURN: 0 * * serialization: LOG_LOCK() held on entry/exit */ static int lmLogSync(struct jfs_log * log, int nosyncwait) static int lmLogSync(struct jfs_log * log, int hard_sync) { int logsize; int written; /* written since last syncpt */ Loading @@ -941,6 +939,13 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait) unsigned long flags; /* push dirty metapages out to disk */ if (hard_sync) list_for_each_entry(sbi, &log->sb_list, log_list) { filemap_fdatawrite(sbi->ipbmap->i_mapping); filemap_fdatawrite(sbi->ipimap->i_mapping); filemap_fdatawrite(sbi->direct_inode->i_mapping); } else list_for_each_entry(sbi, &log->sb_list, log_list) { filemap_flush(sbi->ipbmap->i_mapping); filemap_flush(sbi->ipimap->i_mapping); Loading Loading @@ -1021,10 +1026,6 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait) /* next syncpt trigger = written + more */ log->nextsync = written + more; /* return if lmLogSync() from outside of transaction, e.g., sync() */ if (nosyncwait) return lsn; /* if number of bytes written from last sync point is more * than 1/4 of the log size, stop new transactions from * starting until all current transactions are completed Loading @@ -1050,10 +1051,11 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait) * FUNCTION: write log SYNCPT record for specified log * * PARAMETERS: log - log structure * hard_sync - set to 1 to force metadata to be written */ void jfs_syncpt(struct jfs_log *log) void jfs_syncpt(struct jfs_log *log, int hard_sync) { LOG_LOCK(log); lmLogSync(log, 1); lmLogSync(log, hard_sync); LOG_UNLOCK(log); } Loading
fs/jfs/jfs_logmgr.h +1 −1 Original line number Diff line number Diff line Loading @@ -510,6 +510,6 @@ extern int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize); extern int lmGroupCommit(struct jfs_log *, struct tblock *); extern int jfsIOWait(void *); extern void jfs_flush_journal(struct jfs_log * log, int wait); extern void jfs_syncpt(struct jfs_log *log); extern void jfs_syncpt(struct jfs_log *log, int hard_sync); #endif /* _H_JFS_LOGMGR */
fs/jfs/jfs_txnmgr.c +7 −5 Original line number Diff line number Diff line Loading @@ -552,6 +552,11 @@ void txEnd(tid_t tid) * synchronize with logsync barrier */ if (test_bit(log_SYNCBARRIER, &log->flag)) { TXN_UNLOCK(); /* write dirty metadata & forward log syncpt */ jfs_syncpt(log, 1); jfs_info("log barrier off: 0x%x", log->lsn); /* enable new transactions start */ Loading @@ -560,11 +565,6 @@ void txEnd(tid_t tid) /* wakeup all waitors for logsync barrier */ TXN_WAKEUP(&log->syncwait); TXN_UNLOCK(); /* forward log syncpt */ jfs_syncpt(log); goto wakeup; } } Loading Loading @@ -657,7 +657,9 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp, /* only anonymous txn. * Remove from anon_list */ TXN_LOCK(); list_del_init(&jfs_ip->anon_inode_list); TXN_UNLOCK(); } jfs_ip->atlhead = tlck->next; } else { Loading
fs/jfs/super.c +3 −1 Original line number Diff line number Diff line Loading @@ -114,6 +114,8 @@ static void jfs_destroy_inode(struct inode *inode) { struct jfs_inode_info *ji = JFS_IP(inode); BUG_ON(!list_empty(&ji->anon_inode_list)); spin_lock_irq(&ji->ag_lock); if (ji->active_ag != -1) { struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; Loading Loading @@ -531,7 +533,7 @@ static int jfs_sync_fs(struct super_block *sb, int wait) /* log == NULL indicates read-only mount */ if (log) { jfs_flush_journal(log, wait); jfs_syncpt(log); jfs_syncpt(log, 0); } return 0; Loading