From 9abbc539bf7f299819ad0a235064a1b643ab6407 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 13 Apr 2010 15:06:46 +1000 Subject: xfs: add log item recovery tracing Currently there is no tracing in log recovery, so it is difficult to determine what is going on when something goes wrong. Add tracing for log item recovery to provide visibility into the log recovery process. The tracing added shows regions being extracted from the log transactions and added to the transaction hash forming recovery items, followed by the reordering, cancelling and finally recovery of the items. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log_recover.c | 44 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 8 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 22e6efdc17e..f21eb8ad2d9 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1408,6 +1408,7 @@ xlog_recover_add_item( STATIC int xlog_recover_add_to_cont_trans( + struct log *log, xlog_recover_t *trans, xfs_caddr_t dp, int len) @@ -1434,6 +1435,7 @@ xlog_recover_add_to_cont_trans( memcpy(&ptr[old_len], dp, len); /* d, s, l */ item->ri_buf[item->ri_cnt-1].i_len += len; item->ri_buf[item->ri_cnt-1].i_addr = ptr; + trace_xfs_log_recover_item_add_cont(log, trans, item, 0); return 0; } @@ -1452,6 +1454,7 @@ xlog_recover_add_to_cont_trans( */ STATIC int xlog_recover_add_to_trans( + struct log *log, xlog_recover_t *trans, xfs_caddr_t dp, int len) @@ -1510,6 +1513,7 @@ xlog_recover_add_to_trans( item->ri_buf[item->ri_cnt].i_addr = ptr; item->ri_buf[item->ri_cnt].i_len = len; item->ri_cnt++; + trace_xfs_log_recover_item_add(log, trans, item, 0); return 0; } @@ -1521,7 +1525,9 @@ xlog_recover_add_to_trans( */ STATIC int xlog_recover_reorder_trans( - xlog_recover_t *trans) + struct log *log, + xlog_recover_t *trans, + int pass) { xlog_recover_item_t *item, *n; LIST_HEAD(sort_list); @@ -1535,6 +1541,8 @@ xlog_recover_reorder_trans( switch (ITEM_TYPE(item)) { case XFS_LI_BUF: if (!(buf_f->blf_flags & XFS_BLI_CANCEL)) { + trace_xfs_log_recover_item_reorder_head(log, + trans, item, pass); list_move(&item->ri_list, &trans->r_itemq); break; } @@ -1543,6 +1551,8 @@ xlog_recover_reorder_trans( case XFS_LI_QUOTAOFF: case XFS_LI_EFD: case XFS_LI_EFI: + trace_xfs_log_recover_item_reorder_tail(log, + trans, item, pass); list_move_tail(&item->ri_list, &trans->r_itemq); break; default: @@ -1592,8 +1602,10 @@ xlog_recover_do_buffer_pass1( /* * If this isn't a cancel buffer item, then just return. */ - if (!(flags & XFS_BLI_CANCEL)) + if (!(flags & XFS_BLI_CANCEL)) { + trace_xfs_log_recover_buf_not_cancel(log, buf_f); return; + } /* * Insert an xfs_buf_cancel record into the hash table of @@ -1627,6 +1639,7 @@ xlog_recover_do_buffer_pass1( while (nextp != NULL) { if (nextp->bc_blkno == blkno && nextp->bc_len == len) { nextp->bc_refcount++; + trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); return; } prevp = nextp; @@ -1640,6 +1653,7 @@ xlog_recover_do_buffer_pass1( bcp->bc_refcount = 1; bcp->bc_next = NULL; prevp->bc_next = bcp; + trace_xfs_log_recover_buf_cancel_add(log, buf_f); } /* @@ -1779,6 +1793,8 @@ xlog_recover_do_inode_buffer( unsigned int *data_map = NULL; unsigned int map_size = 0; + trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); + switch (buf_f->blf_type) { case XFS_LI_BUF: data_map = buf_f->blf_data_map; @@ -1874,6 +1890,7 @@ xlog_recover_do_inode_buffer( /*ARGSUSED*/ STATIC void xlog_recover_do_reg_buffer( + struct xfs_mount *mp, xlog_recover_item_t *item, xfs_buf_t *bp, xfs_buf_log_format_t *buf_f) @@ -1885,6 +1902,8 @@ xlog_recover_do_reg_buffer( unsigned int map_size = 0; int error; + trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); + switch (buf_f->blf_type) { case XFS_LI_BUF: data_map = buf_f->blf_data_map; @@ -2083,6 +2102,8 @@ xlog_recover_do_dquot_buffer( { uint type; + trace_xfs_log_recover_buf_dquot_buf(log, buf_f); + /* * Filesystems are required to send in quota flags at mount time. */ @@ -2103,7 +2124,7 @@ xlog_recover_do_dquot_buffer( if (log->l_quotaoffs_flag & type) return; - xlog_recover_do_reg_buffer(item, bp, buf_f); + xlog_recover_do_reg_buffer(mp, item, bp, buf_f); } /* @@ -2164,9 +2185,11 @@ xlog_recover_do_buffer_trans( */ cancel = xlog_recover_do_buffer_pass2(log, buf_f); if (cancel) { + trace_xfs_log_recover_buf_cancel(log, buf_f); return 0; } } + trace_xfs_log_recover_buf_recover(log, buf_f); switch (buf_f->blf_type) { case XFS_LI_BUF: blkno = buf_f->blf_blkno; @@ -2204,7 +2227,7 @@ xlog_recover_do_buffer_trans( (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); } else { - xlog_recover_do_reg_buffer(item, bp, buf_f); + xlog_recover_do_reg_buffer(mp, item, bp, buf_f); } if (error) return XFS_ERROR(error); @@ -2284,8 +2307,10 @@ xlog_recover_do_inode_trans( if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno, in_f->ilf_len, 0)) { error = 0; + trace_xfs_log_recover_inode_cancel(log, in_f); goto error; } + trace_xfs_log_recover_inode_recover(log, in_f); bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, XBF_LOCK); @@ -2337,6 +2362,7 @@ xlog_recover_do_inode_trans( /* do nothing */ } else { xfs_buf_relse(bp); + trace_xfs_log_recover_inode_skip(log, in_f); error = 0; goto error; } @@ -2758,11 +2784,12 @@ xlog_recover_do_trans( int error = 0; xlog_recover_item_t *item; - error = xlog_recover_reorder_trans(trans); + error = xlog_recover_reorder_trans(log, trans, pass); if (error) return error; list_for_each_entry(item, &trans->r_itemq, ri_list) { + trace_xfs_log_recover_item_recover(log, trans, item, pass); switch (ITEM_TYPE(item)) { case XFS_LI_BUF: error = xlog_recover_do_buffer_trans(log, item, pass); @@ -2919,8 +2946,9 @@ xlog_recover_process_data( error = xlog_recover_unmount_trans(trans); break; case XLOG_WAS_CONT_TRANS: - error = xlog_recover_add_to_cont_trans(trans, - dp, be32_to_cpu(ohead->oh_len)); + error = xlog_recover_add_to_cont_trans(log, + trans, dp, + be32_to_cpu(ohead->oh_len)); break; case XLOG_START_TRANS: xlog_warn( @@ -2930,7 +2958,7 @@ xlog_recover_process_data( break; case 0: case XLOG_CONTINUE_TRANS: - error = xlog_recover_add_to_trans(trans, + error = xlog_recover_add_to_trans(log, trans, dp, be32_to_cpu(ohead->oh_len)); break; default: -- cgit From 6881a229f66f74e4e0a73504389695213987955b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 13 Apr 2010 15:22:29 +1000 Subject: xfs: fix min bufsize bugs in two places This fixes a bug in two places that I found by inspection. In xlog_find_verify_cycle() and xlog_write_log_records(), the code attempts to allocate a buffer to hold as many blocks as possible. It gives up if the number of blocks to be allocated gets too small. Right now it uses log->l_sectbb_log as that lower bound, but I'm sure it's supposed to be the actual log sector size instead. That is, the lower bound should be (1 << log->l_sectbb_log). Also define a simple macro xlog_sectbb(log) to represent the number of basic blocks in a sector for the given log. (No change from original submission; I have implemented Christoph's suggestion about storing l_sectsize rather than l_sectbb_log in a new, separate patch in this series.) Signed-off-by: Alex Elder Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner --- fs/xfs/xfs_log_recover.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index f21eb8ad2d9..0d81a909255 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -66,6 +66,9 @@ STATIC void xlog_recover_check_summary(xlog_t *); ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) ) #define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask) +/* Number of basic blocks in a log sector */ +#define xlog_sectbb(log) (1 << (log)->l_sectbb_log) + STATIC xfs_buf_t * xlog_get_bp( xlog_t *log, @@ -376,12 +379,16 @@ xlog_find_verify_cycle( xfs_caddr_t buf = NULL; int error = 0; + /* + * Greedily allocate a buffer big enough to handle the full + * range of basic blocks we'll be examining. If that fails, + * try a smaller size. We need to be able to read at least + * a log sector, or we're out of luck. + */ bufblks = 1 << ffs(nbblks); - while (!(bp = xlog_get_bp(log, bufblks))) { - /* can't get enough memory to do everything in one big buffer */ bufblks >>= 1; - if (bufblks <= log->l_sectbb_log) + if (bufblks < xlog_sectbb(log)) return ENOMEM; } @@ -1158,10 +1165,16 @@ xlog_write_log_records( int error = 0; int i, j = 0; + /* + * Greedily allocate a buffer big enough to handle the full + * range of basic blocks to be written. If that fails, try + * a smaller size. We need to be able to write at least a + * log sector, or we're out of luck. + */ bufblks = 1 << ffs(blocks); while (!(bp = xlog_get_bp(log, bufblks))) { bufblks >>= 1; - if (bufblks <= log->l_sectbb_log) + if (bufblks < xlog_sectbb(log)) return ENOMEM; } -- cgit From 8511998baaf541710f457315958cef0d0a7864a1 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 13 Apr 2010 15:22:40 +1000 Subject: xfs: simplify XLOG_SECTOR_ROUND*() XLOG_SECTOR_ROUNDUP_BBCOUNT() is defined in "fs/xfs/xfs_log_recover.c" in an overly-complicated way. It is basically roundup(), but that is not at all clear from its definition. (Actually, there is another macro round_up() that applies for power-of-two-based masks which I'll be using here.) The operands in XLOG_SECTOR_ROUNDUP_BBCOUNT() are basically the block number (bbs) and the log sector basic block mask (log->l_sectbb_mask). I'll call them B and M for this discussion. The macro computes is value this way: M && (B & M) ? (B + M + 1) & ~M : B Put another way, we can break it into 3 cases: 1) ! M -> B # 0 mask, no effect 2) ! (B & M) -> B # sector aligned 3) M && (B & M) -> (B + M + 1) & ~M # round up otherwise The round_up() macro is cleverly defined using a value, v, and a power-of-2, p, and the result is the nearest multiple of p greater than or equal to v. Its value is computed something like this: ((v - 1) | (p - 1)) + 1 Let's consider using this in the context of the 3 cases above. When p = 2^0 = 1, the result boils down to ((v - 1) | 0) + 1, so it just translates any value v to itself. That handles case (1) above. When p = 2^n, n > 0, we know that (p - 1) will be a mask with all n bits 0..n-1 set. The condition in this case occurs when none of those mask bits is set in the value v provided. If that is the case, subtracting 1 from v will have 1's in all those lower bits (at least). Therefore, OR-ing the mask with that decremented value has no effect, so adding the 1 back again will just translate the v to itself. This handles case (2). Otherwise, the value v is greater than some multiple of p, and decrementing it will produce a result greater than or equal to that multiple. OR-ing in the mask will produce a value 1 less than the next multiple of p, so finally adding 1 back will result in the desired rounded-up value. This handles case (3). Hopefully this is convincing. While I was at it, I converted XLOG_SECTOR_ROUNDDOWN_BLKNO() to use the round_down() macro. Signed-off-by: Alex Elder Reviewed-by: Christoph Hellwig Signed-off-by: Dave Chinner --- fs/xfs/xfs_log_recover.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 0d81a909255..2813a6ef15b 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -61,14 +61,13 @@ STATIC void xlog_recover_check_summary(xlog_t *); * Sector aligned buffer routines for buffer create/read/write/access */ -#define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs) \ - ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \ - ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) ) -#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask) - /* Number of basic blocks in a log sector */ #define xlog_sectbb(log) (1 << (log)->l_sectbb_log) +#define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs) round_up((bbs), xlog_sectbb(log)) +#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) \ + round_down((bno), xlog_sectbb(log)) + STATIC xfs_buf_t * xlog_get_bp( xlog_t *log, -- cgit From 5c17f5339f9dfdee8ad9661e97f8030d75b6bff7 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 13 Apr 2010 15:22:48 +1000 Subject: xfs: kill XLOG_SECTOR_ROUND*() XLOG_SECTOR_ROUNDUP_BBCOUNT() and XLOG_SECTOR_ROUNDDOWN_BLKNO() are now fairly simple macro translations. Just get rid of them in favor of the round_up() and round_down() macro calls they represent. Also, in spots in xlog_get_bp() and xlog_write_log_records(), round_up() was being called with value 1, which just evaluates to the macro's second argument; so just use that instead. In the latter case, make use of that value, as long as it's already been computed. Signed-off-by: Alex Elder Reviewed-by: Christoph Hellwig Signed-off-by: Dave Chinner --- fs/xfs/xfs_log_recover.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 2813a6ef15b..0e51bdd910a 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -56,7 +56,6 @@ STATIC void xlog_recover_check_summary(xlog_t *); #define xlog_recover_check_summary(log) #endif - /* * Sector aligned buffer routines for buffer create/read/write/access */ @@ -64,10 +63,6 @@ STATIC void xlog_recover_check_summary(xlog_t *); /* Number of basic blocks in a log sector */ #define xlog_sectbb(log) (1 << (log)->l_sectbb_log) -#define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs) round_up((bbs), xlog_sectbb(log)) -#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) \ - round_down((bno), xlog_sectbb(log)) - STATIC xfs_buf_t * xlog_get_bp( xlog_t *log, @@ -82,8 +77,8 @@ xlog_get_bp( if (log->l_sectbb_log) { if (nbblks > 1) - nbblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1); - nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks); + nbblks += xlog_sectbb(log); + nbblks = round_up(nbblks, xlog_sectbb(log)); } return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp); } @@ -134,8 +129,8 @@ xlog_bread_noalign( } if (log->l_sectbb_log) { - blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no); - nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks); + blk_no = round_down(blk_no, xlog_sectbb(log)); + nbblks = round_up(nbblks, xlog_sectbb(log)); } ASSERT(nbblks > 0); @@ -196,8 +191,8 @@ xlog_bwrite( } if (log->l_sectbb_log) { - blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no); - nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks); + blk_no = round_down(blk_no, xlog_sectbb(log)); + nbblks = round_up(nbblks, xlog_sectbb(log)); } ASSERT(nbblks > 0); @@ -1158,7 +1153,7 @@ xlog_write_log_records( xfs_caddr_t offset; xfs_buf_t *bp; int balign, ealign; - int sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1); + int sectbb = xlog_sectbb(log); int end_block = start_block + blocks; int bufblks; int error = 0; @@ -1181,7 +1176,7 @@ xlog_write_log_records( * the buffer in the starting sector not covered by the first * write below. */ - balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block); + balign = round_down(start_block, sectbb); if (balign != start_block) { error = xlog_bread_noalign(log, start_block, 1, bp); if (error) @@ -1200,7 +1195,7 @@ xlog_write_log_records( * the buffer in the final sector not covered by the write. * If this is the same sector as the above read, skip it. */ - ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block); + ealign = round_down(end_block, sectbb); if (j == 0 && (start_block + endcount > ealign)) { offset = XFS_BUF_PTR(bp); balign = BBTOB(ealign - start_block); -- cgit From ff30a6221d95b609a37410a425937b11a55d465e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 13 Apr 2010 15:22:58 +1000 Subject: xfs: encapsulate bbcount validity checking Define a function that encapsulates checking the validity of a log block count. (Updated from previous version--no longer includes error reporting in the encapsulated validation function.) Signed-off-by: Alex Elder Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner --- fs/xfs/xfs_log_recover.c | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 0e51bdd910a..b5eab63eb12 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -63,15 +63,29 @@ STATIC void xlog_recover_check_summary(xlog_t *); /* Number of basic blocks in a log sector */ #define xlog_sectbb(log) (1 << (log)->l_sectbb_log) +/* + * Verify the given count of basic blocks is valid number of blocks + * to specify for an operation involving the given XFS log buffer. + * Returns nonzero if the count is valid, 0 otherwise. + */ + +static inline int +xlog_buf_bbcount_valid( + xlog_t *log, + int bbcount) +{ + return bbcount > 0 && bbcount <= log->l_logBBsize; +} + STATIC xfs_buf_t * xlog_get_bp( xlog_t *log, int nbblks) { - if (nbblks <= 0 || nbblks > log->l_logBBsize) { - xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks); - XFS_ERROR_REPORT("xlog_get_bp(1)", - XFS_ERRLEVEL_HIGH, log->l_mp); + if (!xlog_buf_bbcount_valid(log, nbblks)) { + xlog_warn("XFS: Invalid block length (0x%x) given for buffer", + nbblks); + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); return NULL; } @@ -121,10 +135,10 @@ xlog_bread_noalign( { int error; - if (nbblks <= 0 || nbblks > log->l_logBBsize) { - xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks); - XFS_ERROR_REPORT("xlog_bread(1)", - XFS_ERRLEVEL_HIGH, log->l_mp); + if (!xlog_buf_bbcount_valid(log, nbblks)) { + xlog_warn("XFS: Invalid block length (0x%x) given for buffer", + nbblks); + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); return EFSCORRUPTED; } @@ -183,10 +197,10 @@ xlog_bwrite( { int error; - if (nbblks <= 0 || nbblks > log->l_logBBsize) { - xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks); - XFS_ERROR_REPORT("xlog_bwrite(1)", - XFS_ERRLEVEL_HIGH, log->l_mp); + if (!xlog_buf_bbcount_valid(log, nbblks)) { + xlog_warn("XFS: Invalid block length (0x%x) given for buffer", + nbblks); + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); return EFSCORRUPTED; } -- cgit From 36adecff50b69df0369cc2022650c6087aeb255f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 13 Apr 2010 15:21:13 +1000 Subject: xfs: nothing special about 1-block log sector There are a number of places where a log sector size of 1 uses special case code. The round_up() and round_down() macros produce the correct result even when the log sector size is 1, and this eliminates the need for treating this as a special case. Signed-off-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log_recover.c | 42 +++++++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 13 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b5eab63eb12..629e88b38bd 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -77,6 +77,11 @@ xlog_buf_bbcount_valid( return bbcount > 0 && bbcount <= log->l_logBBsize; } +/* + * Allocate a buffer to hold log data. The buffer needs to be able + * to map to a range of nbblks basic blocks at any valid (basic + * block) offset within the log. + */ STATIC xfs_buf_t * xlog_get_bp( xlog_t *log, @@ -89,11 +94,26 @@ xlog_get_bp( return NULL; } - if (log->l_sectbb_log) { - if (nbblks > 1) - nbblks += xlog_sectbb(log); - nbblks = round_up(nbblks, xlog_sectbb(log)); - } + /* + * We do log I/O in units of log sectors (a power-of-2 + * multiple of the basic block size), so we round up the + * requested size to acommodate the basic blocks required + * for complete log sectors. + * + * In addition, the buffer may be used for a non-sector- + * aligned block offset, in which case an I/O of the + * requested size could extend beyond the end of the + * buffer. If the requested size is only 1 basic block it + * will never straddle a sector boundary, so this won't be + * an issue. Nor will this be a problem if the log I/O is + * done in basic blocks (sector size 1). But otherwise we + * extend the buffer by one extra log sector to ensure + * there's space to accomodate this possiblility. + */ + if (nbblks > 1 && log->l_sectbb_log) + nbblks += xlog_sectbb(log); + nbblks = round_up(nbblks, xlog_sectbb(log)); + return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp); } @@ -142,10 +162,8 @@ xlog_bread_noalign( return EFSCORRUPTED; } - if (log->l_sectbb_log) { - blk_no = round_down(blk_no, xlog_sectbb(log)); - nbblks = round_up(nbblks, xlog_sectbb(log)); - } + blk_no = round_down(blk_no, xlog_sectbb(log)); + nbblks = round_up(nbblks, xlog_sectbb(log)); ASSERT(nbblks > 0); ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); @@ -204,10 +222,8 @@ xlog_bwrite( return EFSCORRUPTED; } - if (log->l_sectbb_log) { - blk_no = round_down(blk_no, xlog_sectbb(log)); - nbblks = round_up(nbblks, xlog_sectbb(log)); - } + blk_no = round_down(blk_no, xlog_sectbb(log)); + nbblks = round_up(nbblks, xlog_sectbb(log)); ASSERT(nbblks > 0); ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); -- cgit From 9db127edb54048707eb84517eb0573e597a2370a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 15 Apr 2010 18:17:26 +0000 Subject: xfs: change a few labels in xfs_log_recover.c Rename a label used in xlog_find_head() that I thought was poorly chosen. Also combine two adjacent labels xlog_find_tail() into a single label, and give it a more generic name. (Now using Dave's suggested "validate_head" name for first label.) Signed-off-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log_recover.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 629e88b38bd..a6cbc140c33 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -739,7 +739,7 @@ xlog_find_head( goto bp_err; if (new_blk != -1) { head_blk = new_blk; - goto bad_blk; + goto validate_head; } /* @@ -757,7 +757,7 @@ xlog_find_head( head_blk = new_blk; } - bad_blk: +validate_head: /* * Now we need to make sure head_blk is not pointing to a block in * the middle of a log record. @@ -864,12 +864,12 @@ xlog_find_tail( if (*head_blk == 0) { /* special case */ error = xlog_bread(log, 0, 1, bp, &offset); if (error) - goto bread_err; + goto done; if (xlog_get_cycle(offset) == 0) { *tail_blk = 0; /* leave all other log inited values alone */ - goto exit; + goto done; } } @@ -880,7 +880,7 @@ xlog_find_tail( for (i = (int)(*head_blk) - 1; i >= 0; i--) { error = xlog_bread(log, i, 1, bp, &offset); if (error) - goto bread_err; + goto done; if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) { found = 1; @@ -897,7 +897,7 @@ xlog_find_tail( for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) { error = xlog_bread(log, i, 1, bp, &offset); if (error) - goto bread_err; + goto done; if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) { @@ -972,7 +972,7 @@ xlog_find_tail( umount_data_blk = (i + hblks) % log->l_logBBsize; error = xlog_bread(log, umount_data_blk, 1, bp, &offset); if (error) - goto bread_err; + goto done; op_head = (xlog_op_header_t *)offset; if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { @@ -1018,12 +1018,10 @@ xlog_find_tail( * But... if the -device- itself is readonly, just skip this. * We can't recover this device anyway, so it won't matter. */ - if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) { + if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) error = xlog_clear_stale_blocks(log, tail_lsn); - } -bread_err: -exit: +done: xlog_put_bp(bp); if (error) -- cgit From e3bb2e30d532b00a9bdda997e174a9f9916cb1c0 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 15 Apr 2010 18:17:30 +0000 Subject: xfs: avoid repeated pointer dereferences In xlog_find_cycle_start() use a local variable for some repeated operations rather than constantly accessing the memory location whose address is passed in. (This version drops an assertion that a pointer is non-null.) Signed-off-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log_recover.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index a6cbc140c33..7b3375db672 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -354,26 +354,27 @@ xlog_find_cycle_start( { xfs_caddr_t offset; xfs_daddr_t mid_blk; + xfs_daddr_t end_blk; uint mid_cycle; int error; - mid_blk = BLK_AVG(first_blk, *last_blk); - while (mid_blk != first_blk && mid_blk != *last_blk) { + end_blk = *last_blk; + mid_blk = BLK_AVG(first_blk, end_blk); + while (mid_blk != first_blk && mid_blk != end_blk) { error = xlog_bread(log, mid_blk, 1, bp, &offset); if (error) return error; mid_cycle = xlog_get_cycle(offset); - if (mid_cycle == cycle) { - *last_blk = mid_blk; - /* last_half_cycle == mid_cycle */ - } else { - first_blk = mid_blk; - /* first_half_cycle == mid_cycle */ - } - mid_blk = BLK_AVG(first_blk, *last_blk); + if (mid_cycle == cycle) + end_blk = mid_blk; /* last_half_cycle == mid_cycle */ + else + first_blk = mid_blk; /* first_half_cycle == mid_cycle */ + mid_blk = BLK_AVG(first_blk, end_blk); } - ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) || - (mid_blk == *last_blk && mid_blk-1 == first_blk)); + ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) || + (mid_blk == end_blk && mid_blk-1 == first_blk)); + + *last_blk = end_blk; return 0; } -- cgit From 3f943d853d6ce6d808e7362e4444c7ed5f692357 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 15 Apr 2010 18:17:34 +0000 Subject: xfs: minor odds and ends in xfs_log_recover.c Odds and ends in "xfs_log_recover.c". This patch just contains some minor things that didn't seem to warrant their own individual patches: - In xlog_bread_noalign(), drop an assertion that a pointer is non-null (the crash will tell us it was a bad pointer). - Add a more descriptive header comment for xlog_find_verify_cycle(). - Make a few additions to the comments in xlog_find_head(). Also rearrange some expressions in a few spots to produce the same result, but in a way that seems more clear what's being computed. (Updated in response to Dave's review comments. Note I did not split this patch like I said I would.) Signed-off-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log_recover.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 7b3375db672..3bfff4220a7 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -167,7 +167,6 @@ xlog_bread_noalign( ASSERT(nbblks > 0); ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); - ASSERT(bp); XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); XFS_BUF_READ(bp); @@ -380,14 +379,12 @@ xlog_find_cycle_start( } /* - * Check that the range of blocks does not contain the cycle number - * given. The scan needs to occur from front to back and the ptr into the - * region must be updated since a later routine will need to perform another - * test. If the region is completely good, we end up returning the same - * last block number. - * - * Set blkno to -1 if we encounter no errors. This is an invalid block number - * since we don't ever expect logs to get this large. + * Check that a range of blocks does not contain stop_on_cycle_no. + * Fill in *new_blk with the block offset where such a block is + * found, or with -1 (an invalid block number) if there is no such + * block in the range. The scan needs to occur from front to back + * and the pointer into the region must be updated since a later + * routine will need to perform another test. */ STATIC int xlog_find_verify_cycle( @@ -661,7 +658,7 @@ xlog_find_head( * In this case we want to find the first block with cycle * number matching last_half_cycle. We expect the log to be * some variation on - * x + 1 ... | x ... + * x + 1 ... | x ... | x * The first block with cycle number x (last_half_cycle) will * be where the new head belongs. First we do a binary search * for the first occurrence of last_half_cycle. The binary @@ -671,11 +668,13 @@ xlog_find_head( * the log, then we look for occurrences of last_half_cycle - 1 * at the end of the log. The cases we're looking for look * like - * x + 1 ... | x | x + 1 | x ... - * ^ binary search stopped here + * v binary search stopped here + * x + 1 ... | x | x + 1 | x ... | x + * ^ but we want to locate this spot * or - * x + 1 ... | x ... | x - 1 | x * <---------> less than scan distance + * x + 1 ... | x ... | x - 1 | x + * ^ we want to locate this spot */ stop_on_cycle = last_half_cycle; if ((error = xlog_find_cycle_start(log, bp, first_blk, @@ -731,9 +730,9 @@ xlog_find_head( * certainly not the head of the log. By searching for * last_half_cycle-1 we accomplish that. */ - start_blk = log_bbnum - num_scan_bblks + head_blk; ASSERT(head_blk <= INT_MAX && - (xfs_daddr_t) num_scan_bblks - head_blk >= 0); + (xfs_daddr_t) num_scan_bblks >= head_blk); + start_blk = log_bbnum - (num_scan_bblks - head_blk); if ((error = xlog_find_verify_cycle(log, start_blk, num_scan_bblks - (int)head_blk, (stop_on_cycle - 1), &new_blk))) @@ -780,7 +779,7 @@ validate_head: if ((error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0)) == -1) { /* We hit the beginning of the log during our search */ - start_blk = log_bbnum - num_scan_bblks + head_blk; + start_blk = log_bbnum - (num_scan_bblks - head_blk); new_blk = log_bbnum; ASSERT(start_blk <= INT_MAX && (xfs_daddr_t) log_bbnum-start_blk >= 0); -- cgit From 1414a6046ab402ac21545522270c32c576327eb9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 20 Apr 2010 17:02:50 +1000 Subject: xfs: remove dead XFS_LOUD_RECOVERY code This can't be enabled through the build system and has been dead for ages. Note that the CRC patches add back log checksumming, but the code is quite different from the version removed here anyway. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner --- fs/xfs/xfs_log_recover.c | 67 ------------------------------------------------ 1 file changed, 67 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 3bfff4220a7..e5b74db5d2e 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3394,42 +3394,6 @@ xlog_pack_data( } } -#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) -STATIC void -xlog_unpack_data_checksum( - xlog_rec_header_t *rhead, - xfs_caddr_t dp, - xlog_t *log) -{ - __be32 *up = (__be32 *)dp; - uint chksum = 0; - int i; - - /* divide length by 4 to get # words */ - for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) { - chksum ^= be32_to_cpu(*up); - up++; - } - if (chksum != be32_to_cpu(rhead->h_chksum)) { - if (rhead->h_chksum || - ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) { - cmn_err(CE_DEBUG, - "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n", - be32_to_cpu(rhead->h_chksum), chksum); - cmn_err(CE_DEBUG, -"XFS: Disregard message if filesystem was created with non-DEBUG kernel"); - if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { - cmn_err(CE_DEBUG, - "XFS: LogR this is a LogV2 filesystem\n"); - } - log->l_flags |= XLOG_CHKSUM_MISMATCH; - } - } -} -#else -#define xlog_unpack_data_checksum(rhead, dp, log) -#endif - STATIC void xlog_unpack_data( xlog_rec_header_t *rhead, @@ -3453,8 +3417,6 @@ xlog_unpack_data( dp += BBSIZE; } } - - xlog_unpack_data_checksum(rhead, dp, log); } STATIC int @@ -4009,10 +3971,6 @@ xlog_recover_check_summary( xfs_agf_t *agfp; xfs_buf_t *agfbp; xfs_buf_t *agibp; - xfs_buf_t *sbbp; -#ifdef XFS_LOUD_RECOVERY - xfs_sb_t *sbp; -#endif xfs_agnumber_t agno; __uint64_t freeblks; __uint64_t itotal; @@ -4047,30 +4005,5 @@ xlog_recover_check_summary( xfs_buf_relse(agibp); } } - - sbbp = xfs_getsb(mp, 0); -#ifdef XFS_LOUD_RECOVERY - sbp = &mp->m_sb; - xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(sbbp)); - cmn_err(CE_NOTE, - "xlog_recover_check_summary: sb_icount %Lu itotal %Lu", - sbp->sb_icount, itotal); - cmn_err(CE_NOTE, - "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu", - sbp->sb_ifree, ifree); - cmn_err(CE_NOTE, - "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu", - sbp->sb_fdblocks, freeblks); -#if 0 - /* - * This is turned off until I account for the allocation - * btree blocks which live in free space. - */ - ASSERT(sbp->sb_icount == itotal); - ASSERT(sbp->sb_ifree == ifree); - ASSERT(sbp->sb_fdblocks == freeblks); -#endif -#endif - xfs_buf_relse(sbbp); } #endif /* DEBUG */ -- cgit From 69ce58f08a3c455ff74cfcde90e9ab267d67f636 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 20 Apr 2010 17:09:59 +1000 Subject: xfs: record log sector size rather than log2(that) Change struct log so it keeps track of the size (in basic blocks) of a log sector in l_sectBBsize rather than the log-base-2 of that value (previously, l_sectbb_log). The name was chosen for consistency with the other fields in the structure that represent a number of basic blocks. (Updated so that a variable used in computing and verifying a log's sector size is named "log2_size". Also added the "BB" to the structure field name, based on feedback from Eric Sandeen. Also dropped some superfluous parentheses.) Signed-off-by: Alex Elder Reviewed-by: Eric Sandeen --- fs/xfs/xfs_log_recover.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index e5b74db5d2e..f1220ec1896 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -60,9 +60,6 @@ STATIC void xlog_recover_check_summary(xlog_t *); * Sector aligned buffer routines for buffer create/read/write/access */ -/* Number of basic blocks in a log sector */ -#define xlog_sectbb(log) (1 << (log)->l_sectbb_log) - /* * Verify the given count of basic blocks is valid number of blocks * to specify for an operation involving the given XFS log buffer. @@ -110,9 +107,9 @@ xlog_get_bp( * extend the buffer by one extra log sector to ensure * there's space to accomodate this possiblility. */ - if (nbblks > 1 && log->l_sectbb_log) - nbblks += xlog_sectbb(log); - nbblks = round_up(nbblks, xlog_sectbb(log)); + if (nbblks > 1 && log->l_sectBBsize > 1) + nbblks += log->l_sectBBsize; + nbblks = round_up(nbblks, log->l_sectBBsize); return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp); } @@ -133,7 +130,7 @@ xlog_align( { xfs_caddr_t ptr; - if (!log->l_sectbb_log) + if (log->l_sectBBsize == 1) return XFS_BUF_PTR(bp); ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask); @@ -162,8 +159,8 @@ xlog_bread_noalign( return EFSCORRUPTED; } - blk_no = round_down(blk_no, xlog_sectbb(log)); - nbblks = round_up(nbblks, xlog_sectbb(log)); + blk_no = round_down(blk_no, log->l_sectBBsize); + nbblks = round_up(nbblks, log->l_sectBBsize); ASSERT(nbblks > 0); ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); @@ -221,8 +218,8 @@ xlog_bwrite( return EFSCORRUPTED; } - blk_no = round_down(blk_no, xlog_sectbb(log)); - nbblks = round_up(nbblks, xlog_sectbb(log)); + blk_no = round_down(blk_no, log->l_sectBBsize); + nbblks = round_up(nbblks, log->l_sectBBsize); ASSERT(nbblks > 0); ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); @@ -410,7 +407,7 @@ xlog_find_verify_cycle( bufblks = 1 << ffs(nbblks); while (!(bp = xlog_get_bp(log, bufblks))) { bufblks >>= 1; - if (bufblks < xlog_sectbb(log)) + if (bufblks < log->l_sectBBsize) return ENOMEM; } @@ -1181,7 +1178,7 @@ xlog_write_log_records( xfs_caddr_t offset; xfs_buf_t *bp; int balign, ealign; - int sectbb = xlog_sectbb(log); + int sectbb = log->l_sectBBsize; int end_block = start_block + blocks; int bufblks; int error = 0; @@ -1196,7 +1193,7 @@ xlog_write_log_records( bufblks = 1 << ffs(blocks); while (!(bp = xlog_get_bp(log, bufblks))) { bufblks >>= 1; - if (bufblks < xlog_sectbb(log)) + if (bufblks < sectbb) return ENOMEM; } @@ -3515,7 +3512,7 @@ xlog_do_recovery_pass( hblks = 1; } } else { - ASSERT(log->l_sectbb_log == 0); + ASSERT(log->l_sectBBsize == 1); hblks = 1; hbp = xlog_get_bp(log, 1); h_size = XLOG_BIG_RECORD_BSIZE; -- cgit From 48389ef17583f2214bbd2c119b3015677419c16b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 20 Apr 2010 17:10:21 +1000 Subject: xfs: kill off l_sectbb_mask There remains only one user of the l_sectbb_mask field in the log structure. Just kill it off and compute the mask where needed from the power-of-2 sector size. (Only update from last post is to accomodate the changes in the previous patch in the series.) Signed-off-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log_recover.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'fs/xfs/xfs_log_recover.c') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index f1220ec1896..0de08e36631 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -121,6 +121,10 @@ xlog_put_bp( xfs_buf_free(bp); } +/* + * Return the address of the start of the given block number's data + * in a log buffer. The buffer covers a log sector-aligned region. + */ STATIC xfs_caddr_t xlog_align( xlog_t *log, @@ -128,14 +132,14 @@ xlog_align( int nbblks, xfs_buf_t *bp) { + xfs_daddr_t offset; xfs_caddr_t ptr; - if (log->l_sectBBsize == 1) - return XFS_BUF_PTR(bp); + offset = blk_no & ((xfs_daddr_t) log->l_sectBBsize - 1); + ptr = XFS_BUF_PTR(bp) + BBTOB(offset); + + ASSERT(ptr + BBTOB(nbblks) <= XFS_BUF_PTR(bp) + XFS_BUF_SIZE(bp)); - ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask); - ASSERT(XFS_BUF_SIZE(bp) >= - BBTOB(nbblks + (blk_no & log->l_sectbb_mask))); return ptr; } -- cgit