Skip to content

Commit f96975e

Browse files
axboesfrothwell
authored andcommitted
mpage: add argument structure for do_mpage_readpage()
Patch series "Submit ->readpages() IO as read-ahead", v4. The only caller of ->readpages() is from read-ahead, yet we don't submit IO flagged with REQ_RAHEAD. This means we don't see it in blktrace, for instance, which is a shame. Additionally, it's preventing further functional changes in the block layer for deadling with read-ahead more intelligently. We already make assumptions about ->readpages() just being for read-ahead in the mpage implementation, using readahead_gfp_mask(mapping) as out GFP mask of choice. This small series fixes up mpage_readpages() to submit with REQ_RAHEAD, which takes care of file systems using mpage_readpages(). The first patch is a prep patch, that makes do_mpage_readpage() take an argument structure. This patch (of 4): We're currently passing 8 arguments to this function, clean it up a bit by packing the arguments in an args structure we pass to it. No intentional functional changes in this patch. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]> Reviewed-by: Andrew Morton <[email protected]> Cc: Al Viro <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Theodore Ts'o <[email protected]> Cc: Chris Mason <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Stephen Rothwell <[email protected]>
1 parent 2b9e517 commit f96975e

File tree

1 file changed

+54
-52
lines changed

1 file changed

+54
-52
lines changed

fs/mpage.c

Lines changed: 54 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,17 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
133133
} while (page_bh != head);
134134
}
135135

136+
struct mpage_readpage_args {
137+
struct bio *bio;
138+
struct page *page;
139+
unsigned nr_pages;
140+
sector_t last_block_in_bio;
141+
struct buffer_head map_bh;
142+
unsigned long first_logical_block;
143+
get_block_t *get_block;
144+
gfp_t gfp;
145+
};
146+
136147
/*
137148
* This is the worker routine which does all the work of mapping the disk
138149
* blocks and constructs largest possible bios, submits them for IO if the
@@ -142,16 +153,14 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
142153
* represent the validity of its disk mapping and to decide when to do the next
143154
* get_block() call.
144155
*/
145-
static struct bio *
146-
do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
147-
sector_t *last_block_in_bio, struct buffer_head *map_bh,
148-
unsigned long *first_logical_block, get_block_t get_block,
149-
gfp_t gfp)
156+
static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
150157
{
158+
struct page *page = args->page;
151159
struct inode *inode = page->mapping->host;
152160
const unsigned blkbits = inode->i_blkbits;
153161
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
154162
const unsigned blocksize = 1 << blkbits;
163+
struct buffer_head *map_bh = &args->map_bh;
155164
sector_t block_in_file;
156165
sector_t last_block;
157166
sector_t last_block_in_file;
@@ -168,7 +177,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
168177
goto confused;
169178

170179
block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
171-
last_block = block_in_file + nr_pages * blocks_per_page;
180+
last_block = block_in_file + args->nr_pages * blocks_per_page;
172181
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
173182
if (last_block > last_block_in_file)
174183
last_block = last_block_in_file;
@@ -178,9 +187,9 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
178187
* Map blocks using the result from the previous get_blocks call first.
179188
*/
180189
nblocks = map_bh->b_size >> blkbits;
181-
if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
182-
block_in_file < (*first_logical_block + nblocks)) {
183-
unsigned map_offset = block_in_file - *first_logical_block;
190+
if (buffer_mapped(map_bh) && block_in_file > args->first_logical_block &&
191+
block_in_file < (args->first_logical_block + nblocks)) {
192+
unsigned map_offset = block_in_file - args->first_logical_block;
184193
unsigned last = nblocks - map_offset;
185194

186195
for (relative_block = 0; ; relative_block++) {
@@ -208,9 +217,9 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
208217

209218
if (block_in_file < last_block) {
210219
map_bh->b_size = (last_block-block_in_file) << blkbits;
211-
if (get_block(inode, block_in_file, map_bh, 0))
220+
if (args->get_block(inode, block_in_file, map_bh, 0))
212221
goto confused;
213-
*first_logical_block = block_in_file;
222+
args->first_logical_block = block_in_file;
214223
}
215224

216225
if (!buffer_mapped(map_bh)) {
@@ -273,43 +282,43 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
273282
/*
274283
* This page will go to BIO. Do we need to send this BIO off first?
275284
*/
276-
if (bio && (*last_block_in_bio != blocks[0] - 1))
277-
bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
285+
if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
286+
args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
278287

279288
alloc_new:
280-
if (bio == NULL) {
289+
if (args->bio == NULL) {
281290
if (first_hole == blocks_per_page) {
282291
if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
283292
page))
284293
goto out;
285294
}
286-
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
287-
min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
288-
if (bio == NULL)
295+
args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
296+
min_t(int, args->nr_pages, BIO_MAX_PAGES), args->gfp);
297+
if (args->bio == NULL)
289298
goto confused;
290299
}
291300

292301
length = first_hole << blkbits;
293-
if (bio_add_page(bio, page, length, 0) < length) {
294-
bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
302+
if (bio_add_page(args->bio, page, length, 0) < length) {
303+
args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
295304
goto alloc_new;
296305
}
297306

298-
relative_block = block_in_file - *first_logical_block;
307+
relative_block = block_in_file - args->first_logical_block;
299308
nblocks = map_bh->b_size >> blkbits;
300309
if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
301310
(first_hole != blocks_per_page))
302-
bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
311+
args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
303312
else
304-
*last_block_in_bio = blocks[blocks_per_page - 1];
313+
args->last_block_in_bio = blocks[blocks_per_page - 1];
305314
out:
306-
return bio;
315+
return args->bio;
307316

308317
confused:
309-
if (bio)
310-
bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
318+
if (args->bio)
319+
args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio);
311320
if (!PageUptodate(page))
312-
block_read_full_page(page, get_block);
321+
block_read_full_page(page, args->get_block);
313322
else
314323
unlock_page(page);
315324
goto out;
@@ -363,34 +372,29 @@ int
363372
mpage_readpages(struct address_space *mapping, struct list_head *pages,
364373
unsigned nr_pages, get_block_t get_block)
365374
{
366-
struct bio *bio = NULL;
375+
struct mpage_readpage_args args = {
376+
.get_block = get_block,
377+
.gfp = readahead_gfp_mask(mapping),
378+
};
367379
unsigned page_idx;
368-
sector_t last_block_in_bio = 0;
369-
struct buffer_head map_bh;
370-
unsigned long first_logical_block = 0;
371-
gfp_t gfp = readahead_gfp_mask(mapping);
372380

373-
map_bh.b_state = 0;
374-
map_bh.b_size = 0;
375381
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
376382
struct page *page = lru_to_page(pages);
377383

378384
prefetchw(&page->flags);
379385
list_del(&page->lru);
380386
if (!add_to_page_cache_lru(page, mapping,
381387
page->index,
382-
gfp)) {
383-
bio = do_mpage_readpage(bio, page,
384-
nr_pages - page_idx,
385-
&last_block_in_bio, &map_bh,
386-
&first_logical_block,
387-
get_block, gfp);
388+
args.gfp)) {
389+
args.page = page;
390+
args.nr_pages = nr_pages - page_idx;
391+
args.bio = do_mpage_readpage(&args);
388392
}
389393
put_page(page);
390394
}
391395
BUG_ON(!list_empty(pages));
392-
if (bio)
393-
mpage_bio_submit(REQ_OP_READ, 0, bio);
396+
if (args.bio)
397+
mpage_bio_submit(REQ_OP_READ, 0, args.bio);
394398
return 0;
395399
}
396400
EXPORT_SYMBOL(mpage_readpages);
@@ -400,18 +404,16 @@ EXPORT_SYMBOL(mpage_readpages);
400404
*/
401405
int mpage_readpage(struct page *page, get_block_t get_block)
402406
{
403-
struct bio *bio = NULL;
404-
sector_t last_block_in_bio = 0;
405-
struct buffer_head map_bh;
406-
unsigned long first_logical_block = 0;
407-
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
407+
struct mpage_readpage_args args = {
408+
.page = page,
409+
.nr_pages = 1,
410+
.get_block = get_block,
411+
.gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL),
412+
};
408413

409-
map_bh.b_state = 0;
410-
map_bh.b_size = 0;
411-
bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
412-
&map_bh, &first_logical_block, get_block, gfp);
413-
if (bio)
414-
mpage_bio_submit(REQ_OP_READ, 0, bio);
414+
args.bio = do_mpage_readpage(&args);
415+
if (args.bio)
416+
mpage_bio_submit(REQ_OP_READ, 0, args.bio);
415417
return 0;
416418
}
417419
EXPORT_SYMBOL(mpage_readpage);

0 commit comments

Comments
 (0)