Skip to content

Commit 57c67df

Browse files
committed
md/raid10: submit IO from originating thread instead of md thread.
queuing writes to the md thread means that all requests go through the one processor which may not be able to keep up with very high request rates. So use the plugging infrastructure to submit all requests on unplug. If a 'schedule' is needed, we fall back on the old approach of handing the requests to the thread for it to handle. This is nearly identical to a recent patch which provided similar functionality to RAID1. Signed-off-by: NeilBrown <[email protected]>
1 parent 532a2a3 commit 57c67df

File tree

1 file changed

+54
-3
lines changed

1 file changed

+54
-3
lines changed

drivers/md/raid10.c

Lines changed: 54 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1055,6 +1055,44 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
10551055
return rdev->new_data_offset;
10561056
}
10571057

1058+
struct raid10_plug_cb {
1059+
struct blk_plug_cb cb;
1060+
struct bio_list pending;
1061+
int pending_cnt;
1062+
};
1063+
1064+
static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1065+
{
1066+
struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1067+
cb);
1068+
struct mddev *mddev = plug->cb.data;
1069+
struct r10conf *conf = mddev->private;
1070+
struct bio *bio;
1071+
1072+
if (from_schedule) {
1073+
spin_lock_irq(&conf->device_lock);
1074+
bio_list_merge(&conf->pending_bio_list, &plug->pending);
1075+
conf->pending_count += plug->pending_cnt;
1076+
spin_unlock_irq(&conf->device_lock);
1077+
md_wakeup_thread(mddev->thread);
1078+
kfree(plug);
1079+
return;
1080+
}
1081+
1082+
/* we aren't scheduling, so we can do the write-out directly. */
1083+
bio = bio_list_get(&plug->pending);
1084+
bitmap_unplug(mddev->bitmap);
1085+
wake_up(&conf->wait_barrier);
1086+
1087+
while (bio) { /* submit pending writes */
1088+
struct bio *next = bio->bi_next;
1089+
bio->bi_next = NULL;
1090+
generic_make_request(bio);
1091+
bio = next;
1092+
}
1093+
kfree(plug);
1094+
}
1095+
10581096
static void make_request(struct mddev *mddev, struct bio * bio)
10591097
{
10601098
struct r10conf *conf = mddev->private;
@@ -1070,6 +1108,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
10701108
& (REQ_DISCARD | REQ_SECURE));
10711109
unsigned long flags;
10721110
struct md_rdev *blocked_rdev;
1111+
struct blk_plug_cb *cb;
1112+
struct raid10_plug_cb *plug = NULL;
10731113
int sectors_handled;
10741114
int max_sectors;
10751115
int sectors;
@@ -1421,11 +1461,22 @@ static void make_request(struct mddev *mddev, struct bio * bio)
14211461
mbio->bi_private = r10_bio;
14221462

14231463
atomic_inc(&r10_bio->remaining);
1464+
1465+
cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
1466+
if (cb)
1467+
plug = container_of(cb, struct raid10_plug_cb, cb);
1468+
else
1469+
plug = NULL;
14241470
spin_lock_irqsave(&conf->device_lock, flags);
1425-
bio_list_add(&conf->pending_bio_list, mbio);
1426-
conf->pending_count++;
1471+
if (plug) {
1472+
bio_list_add(&plug->pending, mbio);
1473+
plug->pending_cnt++;
1474+
} else {
1475+
bio_list_add(&conf->pending_bio_list, mbio);
1476+
conf->pending_count++;
1477+
}
14271478
spin_unlock_irqrestore(&conf->device_lock, flags);
1428-
if (!mddev_check_plugged(mddev))
1479+
if (!plug)
14291480
md_wakeup_thread(mddev->thread);
14301481

14311482
if (!r10_bio->devs[i].repl_bio)

0 commit comments

Comments
 (0)