@@ -1332,7 +1332,6 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1332
1332
1333
1333
static void __wb_update_bandwidth (struct dirty_throttle_control * gdtc ,
1334
1334
struct dirty_throttle_control * mdtc ,
1335
- unsigned long start_time ,
1336
1335
bool update_ratelimit )
1337
1336
{
1338
1337
struct bdi_writeback * wb = gdtc -> wb ;
@@ -1352,13 +1351,6 @@ static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1352
1351
dirtied = percpu_counter_read (& wb -> stat [WB_DIRTIED ]);
1353
1352
written = percpu_counter_read (& wb -> stat [WB_WRITTEN ]);
1354
1353
1355
- /*
1356
- * Skip quiet periods when disk bandwidth is under-utilized.
1357
- * (at least 1s idle time between two flusher runs)
1358
- */
1359
- if (elapsed > HZ && time_before (wb -> bw_time_stamp , start_time ))
1360
- goto snapshot ;
1361
-
1362
1354
if (update_ratelimit ) {
1363
1355
domain_update_bandwidth (gdtc , now );
1364
1356
wb_update_dirty_ratelimit (gdtc , dirtied , elapsed );
@@ -1374,17 +1366,36 @@ static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1374
1366
}
1375
1367
wb_update_write_bandwidth (wb , elapsed , written );
1376
1368
1377
- snapshot :
1378
1369
wb -> dirtied_stamp = dirtied ;
1379
1370
wb -> written_stamp = written ;
1380
1371
wb -> bw_time_stamp = now ;
1381
1372
}
1382
1373
1383
- void wb_update_bandwidth (struct bdi_writeback * wb , unsigned long start_time )
1374
+ static void wb_update_bandwidth (struct bdi_writeback * wb )
1384
1375
{
1385
1376
struct dirty_throttle_control gdtc = { GDTC_INIT (wb ) };
1386
1377
1387
- __wb_update_bandwidth (& gdtc , NULL , start_time , false);
1378
+ spin_lock (& wb -> list_lock );
1379
+ __wb_update_bandwidth (& gdtc , NULL , false);
1380
+ spin_unlock (& wb -> list_lock );
1381
+ }
1382
+
1383
+ /* Interval after which we consider wb idle and don't estimate bandwidth */
1384
+ #define WB_BANDWIDTH_IDLE_JIF (HZ)
1385
+
1386
+ static void wb_bandwidth_estimate_start (struct bdi_writeback * wb )
1387
+ {
1388
+ unsigned long now = jiffies ;
1389
+ unsigned long elapsed = now - READ_ONCE (wb -> bw_time_stamp );
1390
+
1391
+ if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
1392
+ !atomic_read (& wb -> writeback_inodes )) {
1393
+ spin_lock (& wb -> list_lock );
1394
+ wb -> dirtied_stamp = wb_stat (wb , WB_DIRTIED );
1395
+ wb -> written_stamp = wb_stat (wb , WB_WRITTEN );
1396
+ wb -> bw_time_stamp = now ;
1397
+ spin_unlock (& wb -> list_lock );
1398
+ }
1388
1399
}
1389
1400
1390
1401
/*
@@ -1713,7 +1724,7 @@ static void balance_dirty_pages(struct bdi_writeback *wb,
1713
1724
if (time_is_before_jiffies (wb -> bw_time_stamp +
1714
1725
BANDWIDTH_INTERVAL )) {
1715
1726
spin_lock (& wb -> list_lock );
1716
- __wb_update_bandwidth (gdtc , mdtc , start_time , true);
1727
+ __wb_update_bandwidth (gdtc , mdtc , true);
1717
1728
spin_unlock (& wb -> list_lock );
1718
1729
}
1719
1730
@@ -2347,9 +2358,12 @@ EXPORT_SYMBOL(generic_writepages);
2347
2358
int do_writepages (struct address_space * mapping , struct writeback_control * wbc )
2348
2359
{
2349
2360
int ret ;
2361
+ struct bdi_writeback * wb ;
2350
2362
2351
2363
if (wbc -> nr_to_write <= 0 )
2352
2364
return 0 ;
2365
+ wb = inode_to_wb_wbc (mapping -> host , wbc );
2366
+ wb_bandwidth_estimate_start (wb );
2353
2367
while (1 ) {
2354
2368
if (mapping -> a_ops -> writepages )
2355
2369
ret = mapping -> a_ops -> writepages (mapping , wbc );
@@ -2360,6 +2374,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2360
2374
cond_resched ();
2361
2375
congestion_wait (BLK_RW_ASYNC , HZ /50 );
2362
2376
}
2377
+ wb_update_bandwidth (wb );
2363
2378
return ret ;
2364
2379
}
2365
2380
0 commit comments