From c1e8d7c6a7a682e1405e3e242d32fc377fd196ff Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Jun 2020 21:33:54 -0700 Subject: mmap locking API: convert mmap_sem comments Convert comments that reference mmap_sem to reference mmap_lock instead. [akpm@linux-foundation.org: fix up linux-next leftovers] [akpm@linux-foundation.org: s/lockaphore/lock/, per Vlastimil] [akpm@linux-foundation.org: more linux-next fixups, per Michel] Signed-off-by: Michel Lespinasse Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Daniel Jordan Cc: Davidlohr Bueso Cc: David Rientjes Cc: Hugh Dickins Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Laurent Dufour Cc: Liam Howlett Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Ying Han Link: http://lkml.kernel.org/r/20200520052908.204642-13-walken@google.com Signed-off-by: Linus Torvalds --- drivers/vfio/pci/vfio_pci.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/vfio/pci/vfio_pci.c') diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index b5f6ef2d12f6..7c0779018b1b 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -1185,7 +1185,7 @@ reset_info_exit: /* * We need to get memory_lock for each device, but devices - * can share mmap_sem, therefore we need to zap and hold + * can share mmap_lock, therefore we need to zap and hold * the vma_lock for each device, and only then get each * memory_lock. */ @@ -1375,26 +1375,26 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) /* * Lock ordering: - * vma_lock is nested under mmap_sem for vm_ops callback paths. + * vma_lock is nested under mmap_lock for vm_ops callback paths. * The memory_lock semaphore is used by both code paths calling * into this function to zap vmas and the vm_ops.fault callback * to protect the memory enable state of the device. * - * When zapping vmas we need to maintain the mmap_sem => vma_lock + * When zapping vmas we need to maintain the mmap_lock => vma_lock * ordering, which requires using vma_lock to walk vma_list to - * acquire an mm, then dropping vma_lock to get the mmap_sem and + * acquire an mm, then dropping vma_lock to get the mmap_lock and * reacquiring vma_lock. This logic is derived from similar * requirements in uverbs_user_mmap_disassociate(). * - * mmap_sem must always be the top-level lock when it is taken. + * mmap_lock must always be the top-level lock when it is taken. * Therefore we can only hold the memory_lock write lock when - * vma_list is empty, as we'd need to take mmap_sem to clear + * vma_list is empty, as we'd need to take mmap_lock to clear * entries. vma_list can only be guaranteed empty when holding * vma_lock, thus memory_lock is nested under vma_lock. * * This enables the vm_ops.fault callback to acquire vma_lock, * followed by memory_lock read lock, while already holding - * mmap_sem without risk of deadlock. + * mmap_lock without risk of deadlock. */ while (1) { struct mm_struct *mm = NULL; -- cgit 1.4.1