From: Bean Huo <[email protected]>

Current generic_file_buffered_read() will break up the larger batches of pages
and read data in single page length in case of ra->ra_pages == 0. This patch is
to allow it to pass the batches of pages down to the device if the supported
maximum IO size >= the requested size.

Signed-off-by: Bean Huo <[email protected]>
---
 mm/filemap.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 1aaea26556cc..0deec1897817 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2062,6 +2062,7 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb,
        struct file *filp = iocb->ki_filp;
        struct address_space *mapping = filp->f_mapping;
        struct inode *inode = mapping->host;
+       struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
        struct file_ra_state *ra = &filp->f_ra;
        loff_t *ppos = &iocb->ki_pos;
        pgoff_t index;
@@ -2098,9 +2099,14 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb,
                if (!page) {
                        if (iocb->ki_flags & IOCB_NOIO)
                                goto would_block;
-                       page_cache_sync_readahead(mapping,
-                                       ra, filp,
-                                       index, last_index - index);
+
+                       if (!ra->ra_pages && bdi->io_pages >= last_index - 
index)
+                               __do_page_cache_readahead(mapping, filp, index,
+                                                         last_index - index, 
0);
+                       else
+                               page_cache_sync_readahead(mapping, ra, filp,
+                                                         index,
+                                                         last_index - index);
                        page = find_get_page(mapping, index);
                        if (unlikely(page == NULL))
                                goto no_cached_page;
-- 
2.17.1

Reply via email to