summary refs log tree commit diff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-04-29 11:53:28 -0400
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-05-09 16:28:36 -0400
commit7e0a126519b82648b254afcd95a168c15f65ea40 (patch)
tree7848ec4f08bb1c2a0effddb4ffa12a978cfe0e1f /mm
parent0f312591d656c1d81bf2cf2a5642af478397a5dc (diff)
downloadlinux-7e0a126519b82648b254afcd95a168c15f65ea40.tar.gz
mm,fs: Remove aops->readpage
With all implementations of aops->readpage converted to aops->read_folio,
we can stop checking whether it's set and remove the member from aops.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c15
-rw-r--r--mm/memory.c4
-rw-r--r--mm/readahead.c12
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/swapfile.c2
5 files changed, 13 insertions, 22 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 96e3d7ffd98e..079f8cca7959 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2414,15 +2414,12 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping,
 
 	/*
 	 * A previous I/O error may have been due to temporary failures,
-	 * eg. multipath errors.  PG_error will be set again if readpage
+	 * eg. multipath errors.  PG_error will be set again if read_folio
 	 * fails.
 	 */
 	folio_clear_error(folio);
 	/* Start the actual read. The read will unlock the page. */
-	if (mapping->a_ops->read_folio)
-		error = mapping->a_ops->read_folio(file, folio);
-	else
-		error = mapping->a_ops->readpage(file, &folio->page);
+	error = mapping->a_ops->read_folio(file, folio);
 	if (error)
 		return error;
 
@@ -2639,7 +2636,7 @@ err:
  * @already_read: Number of bytes already read by the caller.
  *
  * Copies data from the page cache.  If the data is not currently present,
- * uses the readahead and readpage address_space operations to fetch it.
+ * uses the readahead and read_folio address_space operations to fetch it.
  *
  * Return: Total number of bytes copied, including those already read by
  * the caller.  If an error happens before any bytes are copied, returns
@@ -3450,7 +3447,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct address_space *mapping = file->f_mapping;
 
-	if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage)
+	if (!mapping->a_ops->read_folio)
 		return -ENOEXEC;
 	file_accessed(file);
 	vma->vm_ops = &generic_file_vm_ops;
@@ -3508,10 +3505,8 @@ repeat:
 filler:
 		if (filler)
 			err = filler(data, &folio->page);
-		else if (mapping->a_ops->read_folio)
-			err = mapping->a_ops->read_folio(data, folio);
 		else
-			err = mapping->a_ops->readpage(data, &folio->page);
+			err = mapping->a_ops->read_folio(data, folio);
 
 		if (err < 0) {
 			folio_put(folio);
diff --git a/mm/memory.c b/mm/memory.c
index 76e3af9639d9..2a12028a3749 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -555,11 +555,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
 		dump_page(page, "bad pte");
 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
 		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
-	pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
+	pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
 		 vma->vm_file,
 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
-		 mapping ? mapping->a_ops->readpage : NULL);
+		 mapping ? mapping->a_ops->read_folio : NULL);
 	dump_stack();
 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 }
diff --git a/mm/readahead.c b/mm/readahead.c
index 76024c20a5a5..39983a3a93f0 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -170,12 +170,9 @@ static void read_pages(struct readahead_control *rac)
 			}
 			folio_unlock(folio);
 		}
-	} else if (aops->read_folio) {
-		while ((folio = readahead_folio(rac)) != NULL)
-			aops->read_folio(rac->file, folio);
 	} else {
 		while ((folio = readahead_folio(rac)) != NULL)
-			aops->readpage(rac->file, &folio->page);
+			aops->read_folio(rac->file, folio);
 	}
 
 	blk_finish_plug(&plug);
@@ -256,8 +253,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
 	}
 
 	/*
-	 * Now start the IO.  We ignore I/O errors - if the page is not
-	 * uptodate then the caller will launch readpage again, and
+	 * Now start the IO.  We ignore I/O errors - if the folio is not
+	 * uptodate then the caller will launch read_folio again, and
 	 * will then handle the error.
 	 */
 	read_pages(ractl);
@@ -305,8 +302,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
 	unsigned long max_pages, index;
 
-	if (unlikely(!mapping->a_ops->read_folio &&
-		     !mapping->a_ops->readpage && !mapping->a_ops->readahead))
+	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
 		return;
 
 	/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 0f557a512171..f3e8de8ff75c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -4162,7 +4162,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
  *
  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
  * with any new page allocations done using the specified allocation flags.
- * But read_cache_page_gfp() uses the ->readpage() method: which does not
+ * But read_cache_page_gfp() uses the ->read_folio() method: which does not
  * suit tmpfs, since it may have pages in swapcache, and needs to find those
  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
  *
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7c19098b8b45..ecd45bdbad9b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3041,7 +3041,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
 	/*
 	 * Read the swap header.
 	 */
-	if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage) {
+	if (!mapping->a_ops->read_folio) {
 		error = -EINVAL;
 		goto bad_swap_unlock_inode;
 	}