Skip to content

Commit 7d0795d

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: make __end_folio_writeback() return void
Rather than check the result of test-and-clear, just check that we have the writeback bit set at the start. This wouldn't catch every case, but it's good enough (and enables the next patch). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Cc: Albert Ou <[email protected]> Cc: Alexander Gordeev <[email protected]> Cc: Andreas Dilger <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Christophe Leroy <[email protected]> Cc: Geert Uytterhoeven <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Ivan Kokshaysky <[email protected]> Cc: Matt Turner <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Nicholas Piggin <[email protected]> Cc: Palmer Dabbelt <[email protected]> Cc: Paul Walmsley <[email protected]> Cc: Richard Henderson <[email protected]> Cc: Sven Schnelle <[email protected]> Cc: "Theodore Ts'o" <[email protected]> Cc: Thomas Bogendoerfer <[email protected]> Cc: Vasily Gorbik <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 0410cd8 commit 7d0795d

File tree

3 files changed

+24
-25
lines changed

3 files changed

+24
-25
lines changed

mm/filemap.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1593,9 +1593,15 @@ EXPORT_SYMBOL(folio_wait_private_2_killable);
15931593
/**
15941594
* folio_end_writeback - End writeback against a folio.
15951595
* @folio: The folio.
1596+
*
1597+
* The folio must actually be under writeback.
1598+
*
1599+
* Context: May be called from process or interrupt context.
15961600
*/
15971601
void folio_end_writeback(struct folio *folio)
15981602
{
1603+
VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
1604+
15991605
/*
16001606
* folio_test_clear_reclaim() could be used here but it is an
16011607
* atomic operation and overkill in this particular case. Failing
@@ -1615,8 +1621,7 @@ void folio_end_writeback(struct folio *folio)
16151621
* reused before the folio_wake().
16161622
*/
16171623
folio_get(folio);
1618-
if (!__folio_end_writeback(folio))
1619-
BUG();
1624+
__folio_end_writeback(folio);
16201625

16211626
smp_mb__after_atomic();
16221627
folio_wake(folio, PG_writeback);

mm/internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
105105

106106
vm_fault_t do_swap_page(struct vm_fault *vmf);
107107
void folio_rotate_reclaimable(struct folio *folio);
108-
bool __folio_end_writeback(struct folio *folio);
108+
void __folio_end_writeback(struct folio *folio);
109109
void deactivate_file_folio(struct folio *folio);
110110
void folio_activate(struct folio *folio);
111111

mm/page-writeback.c

Lines changed: 16 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -2940,11 +2940,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
29402940
spin_unlock_irqrestore(&wb->work_lock, flags);
29412941
}
29422942

2943-
bool __folio_end_writeback(struct folio *folio)
2943+
void __folio_end_writeback(struct folio *folio)
29442944
{
29452945
long nr = folio_nr_pages(folio);
29462946
struct address_space *mapping = folio_mapping(folio);
2947-
bool ret;
29482947

29492948
folio_memcg_lock(folio);
29502949
if (mapping && mapping_use_writeback_tags(mapping)) {
@@ -2953,19 +2952,16 @@ bool __folio_end_writeback(struct folio *folio)
29532952
unsigned long flags;
29542953

29552954
xa_lock_irqsave(&mapping->i_pages, flags);
2956-
ret = folio_test_clear_writeback(folio);
2957-
if (ret) {
2958-
__xa_clear_mark(&mapping->i_pages, folio_index(folio),
2959-
PAGECACHE_TAG_WRITEBACK);
2960-
if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
2961-
struct bdi_writeback *wb = inode_to_wb(inode);
2962-
2963-
wb_stat_mod(wb, WB_WRITEBACK, -nr);
2964-
__wb_writeout_add(wb, nr);
2965-
if (!mapping_tagged(mapping,
2966-
PAGECACHE_TAG_WRITEBACK))
2967-
wb_inode_writeback_end(wb);
2968-
}
2955+
folio_test_clear_writeback(folio);
2956+
__xa_clear_mark(&mapping->i_pages, folio_index(folio),
2957+
PAGECACHE_TAG_WRITEBACK);
2958+
if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
2959+
struct bdi_writeback *wb = inode_to_wb(inode);
2960+
2961+
wb_stat_mod(wb, WB_WRITEBACK, -nr);
2962+
__wb_writeout_add(wb, nr);
2963+
if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
2964+
wb_inode_writeback_end(wb);
29692965
}
29702966

29712967
if (mapping->host && !mapping_tagged(mapping,
@@ -2974,15 +2970,13 @@ bool __folio_end_writeback(struct folio *folio)
29742970

29752971
xa_unlock_irqrestore(&mapping->i_pages, flags);
29762972
} else {
2977-
ret = folio_test_clear_writeback(folio);
2978-
}
2979-
if (ret) {
2980-
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
2981-
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2982-
node_stat_mod_folio(folio, NR_WRITTEN, nr);
2973+
folio_test_clear_writeback(folio);
29832974
}
2975+
2976+
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
2977+
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2978+
node_stat_mod_folio(folio, NR_WRITTEN, nr);
29842979
folio_memcg_unlock(folio);
2985-
return ret;
29862980
}
29872981

29882982
bool __folio_start_writeback(struct folio *folio, bool keep_write)

0 commit comments

Comments
 (0)