Skip to content

Commit

Permalink
btrfs: subpage: dump the involved bitmap when ASSERT() failed
Browse files Browse the repository at this point in the history
For btrfs_folio_assert_not_dirty() and btrfs_folio_set_lock(), we call
bitmap_test_range_all_zero() to ensure the involved range has not bit
set.

However with my recent enhanced delalloc range error handling, I'm
hitting the ASSERT() inside btrfs_folio_set_lock(), and is wondering if
it's some error handling not properly cleanup the locked bitmap but
directly unlock the page.

So add some extra dumpping for the ASSERTs to dump the involved bitmap
to help debug.

Signed-off-by: Qu Wenruo <[email protected]>
  • Loading branch information
adam900710 committed Dec 12, 2024
1 parent 7533f83 commit 498c745
Showing 1 changed file with 30 additions and 11 deletions.
41 changes: 30 additions & 11 deletions fs/btrfs/subpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -635,6 +635,28 @@ IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
folio_test_checked);

#define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
{ \
const int sectors_per_page = fs_info->sectors_per_page; \
\
ASSERT(sectors_per_page < BITS_PER_LONG); \
*dst = bitmap_read(subpage->bitmaps, \
sectors_per_page * btrfs_bitmap_nr_##name, \
sectors_per_page); \
}

#define subpage_dump_bitmap(fs_info, folio, name, start, len) \
{ \
struct btrfs_subpage *subpage = folio_get_private(folio); \
unsigned long bitmap; \
\
GET_SUBPAGE_BITMAP(subpage, fs_info, name, &bitmap); \
btrfs_warn(fs_info, \
"dumpping bitmap start=%llu len=%u folio=%llu" #name "_bitmap=%*pbl", \
start, len, folio_pos(folio), \
fs_info->sectors_per_page, &bitmap); \
}

/*
* Make sure not only the page dirty bit is cleared, but also subpage dirty bit
* is cleared.
Expand All @@ -660,6 +682,10 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
subpage = folio_get_private(folio);
ASSERT(subpage);
spin_lock_irqsave(&subpage->lock, flags);
if (unlikely(!bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits))) {
subpage_dump_bitmap(fs_info, folio, dirty, start, len);
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
}
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
spin_unlock_irqrestore(&subpage->lock, flags);
}
Expand Down Expand Up @@ -689,23 +715,16 @@ void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
nbits = len >> fs_info->sectorsize_bits;
spin_lock_irqsave(&subpage->lock, flags);
/* Target range should not yet be locked. */
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
if (unlikely(!bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits))) {
subpage_dump_bitmap(fs_info, folio, locked, start, len);
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
}
bitmap_set(subpage->bitmaps, start_bit, nbits);
ret = atomic_add_return(nbits, &subpage->nr_locked);
ASSERT(ret <= fs_info->sectors_per_page);
spin_unlock_irqrestore(&subpage->lock, flags);
}

#define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
{ \
const int sectors_per_page = fs_info->sectors_per_page; \
\
ASSERT(sectors_per_page < BITS_PER_LONG); \
*dst = bitmap_read(subpage->bitmaps, \
sectors_per_page * btrfs_bitmap_nr_##name, \
sectors_per_page); \
}

void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
Expand Down

0 comments on commit 498c745

Please sign in to comment.