summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--drivers/char/mspec.c69
1 files changed, 48 insertions, 21 deletions
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index c08a4152ee8f..049a46cc9f87 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -67,7 +67,7 @@
 /*
  * Page types allocated by the device.
  */
-enum {
+enum mspec_page_type {
 	MSPEC_FETCHOP = 1,
 	MSPEC_CACHED,
 	MSPEC_UNCACHED
@@ -83,15 +83,25 @@ static int is_sn2;
  * One of these structures is allocated when an mspec region is mmaped. The
  * structure is pointed to by the vma->vm_private_data field in the vma struct.
  * This structure is used to record the addresses of the mspec pages.
+ * This structure is shared by all vma's that are split off from the
+ * original vma when split_vma()'s are done.
+ *
+ * The refcnt is incremented atomically because mm->mmap_sem does not
+ * protect in fork case where multiple tasks share the vma_data.
  */
 struct vma_data {
 	atomic_t refcnt;	/* Number of vmas sharing the data. */
-	spinlock_t lock;	/* Serialize access to the vma. */
+	spinlock_t lock;	/* Serialize access to this structure. */
 	int count;		/* Number of pages allocated. */
-	int type;		/* Type of pages allocated. */
+	enum mspec_page_type type; /* Type of pages allocated. */
+	int flags;		/* See VMD_xxx below. */
+	unsigned long vm_start;	/* Original (unsplit) base. */
+	unsigned long vm_end;	/* Original (unsplit) end. */
 	unsigned long maddr[0];	/* Array of MSPEC addresses. */
 };
 
+#define VMD_VMALLOCED 0x1	/* vmalloc'd rather than kmalloc'd */
+
 /* used on shub2 to clear FOP cache in the HUB */
 static unsigned long scratch_page[MAX_NUMNODES];
 #define SH2_AMO_CACHE_ENTRIES	4
@@ -129,8 +139,8 @@ mspec_zero_block(unsigned long addr, int len)
  * mspec_open
  *
  * Called when a device mapping is created by a means other than mmap
- * (via fork, etc.).  Increments the reference count on the underlying
- * mspec data so it is not freed prematurely.
+ * (via fork, munmap, etc.).  Increments the reference count on the
+ * underlying mspec data so it is not freed prematurely.
  */
 static void
 mspec_open(struct vm_area_struct *vma)
@@ -151,34 +161,44 @@ static void
 mspec_close(struct vm_area_struct *vma)
 {
 	struct vma_data *vdata;
-	int i, pages, result, vdata_size;
+	int index, last_index, result;
+	unsigned long my_page;
 
 	vdata = vma->vm_private_data;
-	if (!atomic_dec_and_test(&vdata->refcnt))
-		return;
 
-	pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-	vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
-	for (i = 0; i < pages; i++) {
-		if (vdata->maddr[i] == 0)
+	BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end);
+
+	spin_lock(&vdata->lock);
+	index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT;
+	last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+	for (; index < last_index; index++) {
+		if (vdata->maddr[index] == 0)
 			continue;
 		/*
 		 * Clear the page before sticking it back
 		 * into the pool.
 		 */
-		result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE);
+		my_page = vdata->maddr[index];
+		vdata->maddr[index] = 0;
+		spin_unlock(&vdata->lock);
+		result = mspec_zero_block(my_page, PAGE_SIZE);
 		if (!result)
-			uncached_free_page(vdata->maddr[i]);
+			uncached_free_page(my_page);
 		else
 			printk(KERN_WARNING "mspec_close(): "
 			       "failed to zero page %i\n",
 			       result);
+		spin_lock(&vdata->lock);
 	}
+	spin_unlock(&vdata->lock);
 
-	if (vdata_size <= PAGE_SIZE)
-		kfree(vdata);
-	else
+	if (!atomic_dec_and_test(&vdata->refcnt))
+		return;
+
+	if (vdata->flags & VMD_VMALLOCED)
 		vfree(vdata);
+	else
+		kfree(vdata);
 }
 
 
@@ -195,7 +215,8 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
 	int index;
 	struct vma_data *vdata = vma->vm_private_data;
 
-	index = (address - vma->vm_start) >> PAGE_SHIFT;
+	BUG_ON(address < vdata->vm_start || address >= vdata->vm_end);
+	index = (address - vdata->vm_start) >> PAGE_SHIFT;
 	maddr = (volatile unsigned long) vdata->maddr[index];
 	if (maddr == 0) {
 		maddr = uncached_alloc_page(numa_node_id());
@@ -237,10 +258,11 @@ static struct vm_operations_struct mspec_vm_ops = {
  * underlying pages.
  */
 static int
-mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
+mspec_mmap(struct file *file, struct vm_area_struct *vma,
+					enum mspec_page_type type)
 {
 	struct vma_data *vdata;
-	int pages, vdata_size;
+	int pages, vdata_size, flags = 0;
 
 	if (vma->vm_pgoff != 0)
 		return -EINVAL;
@@ -255,12 +277,17 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
 	vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
 	if (vdata_size <= PAGE_SIZE)
 		vdata = kmalloc(vdata_size, GFP_KERNEL);
-	else
+	else {
 		vdata = vmalloc(vdata_size);
+		flags = VMD_VMALLOCED;
+	}
 	if (!vdata)
 		return -ENOMEM;
 	memset(vdata, 0, vdata_size);
 
+	vdata->vm_start = vma->vm_start;
+	vdata->vm_end = vma->vm_end;
+	vdata->flags = flags;
 	vdata->type = type;
 	spin_lock_init(&vdata->lock);
 	vdata->refcnt = ATOMIC_INIT(1);