summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2011-10-11 16:48:43 +1100
committerNeilBrown <neilb@suse.de>2011-10-11 16:48:43 +1100
commit9f2c9d12bcc53fcb3b787023723754e84d1aef8b (patch)
tree85fbc63c6e480bdebe06529e1af4d3d483d70452 /drivers
parent2b8bf3451d1e3133ebc3998721d14013a6c27114 (diff)
downloadlinux-9f2c9d12bcc53fcb3b787023723754e84d1aef8b.tar.gz
md: remove typedefs: r10bio_t -> struct r10bio and r1bio_t -> struct r1bio
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/raid1.c60
-rw-r--r--drivers/md/raid1.h15
-rw-r--r--drivers/md/raid10.c68
-rw-r--r--drivers/md/raid10.h4
4 files changed, 71 insertions, 76 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 50bd7c9411b9..6022111a4b28 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -52,7 +52,7 @@ static void lower_barrier(conf_t *conf);
 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
 	struct pool_info *pi = data;
-	int size = offsetof(r1bio_t, bios[pi->raid_disks]);
+	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
 
 	/* allocate a r1bio with room for raid_disks entries in the bios array */
 	return kzalloc(size, gfp_flags);
@@ -73,7 +73,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
 {
 	struct pool_info *pi = data;
 	struct page *page;
-	r1bio_t *r1_bio;
+	struct r1bio *r1_bio;
 	struct bio *bio;
 	int i, j;
 
@@ -139,7 +139,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data)
 {
 	struct pool_info *pi = data;
 	int i,j;
-	r1bio_t *r1bio = __r1_bio;
+	struct r1bio *r1bio = __r1_bio;
 
 	for (i = 0; i < RESYNC_PAGES; i++)
 		for (j = pi->raid_disks; j-- ;) {
@@ -154,7 +154,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data)
 	r1bio_pool_free(r1bio, data);
 }
 
-static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
+static void put_all_bios(conf_t *conf, struct r1bio *r1_bio)
 {
 	int i;
 
@@ -166,7 +166,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
 	}
 }
 
-static void free_r1bio(r1bio_t *r1_bio)
+static void free_r1bio(struct r1bio *r1_bio)
 {
 	conf_t *conf = r1_bio->mddev->private;
 
@@ -174,7 +174,7 @@ static void free_r1bio(r1bio_t *r1_bio)
 	mempool_free(r1_bio, conf->r1bio_pool);
 }
 
-static void put_buf(r1bio_t *r1_bio)
+static void put_buf(struct r1bio *r1_bio)
 {
 	conf_t *conf = r1_bio->mddev->private;
 	int i;
@@ -190,7 +190,7 @@ static void put_buf(r1bio_t *r1_bio)
 	lower_barrier(conf);
 }
 
-static void reschedule_retry(r1bio_t *r1_bio)
+static void reschedule_retry(struct r1bio *r1_bio)
 {
 	unsigned long flags;
 	struct mddev *mddev = r1_bio->mddev;
@@ -210,7 +210,7 @@ static void reschedule_retry(r1bio_t *r1_bio)
  * operation and are ready to return a success/failure code to the buffer
  * cache layer.
  */
-static void call_bio_endio(r1bio_t *r1_bio)
+static void call_bio_endio(struct r1bio *r1_bio)
 {
 	struct bio *bio = r1_bio->master_bio;
 	int done;
@@ -237,7 +237,7 @@ static void call_bio_endio(r1bio_t *r1_bio)
 	}
 }
 
-static void raid_end_bio_io(r1bio_t *r1_bio)
+static void raid_end_bio_io(struct r1bio *r1_bio)
 {
 	struct bio *bio = r1_bio->master_bio;
 
@@ -257,7 +257,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
 /*
  * Update disk head position estimator based on IRQ completion info.
  */
-static inline void update_head_pos(int disk, r1bio_t *r1_bio)
+static inline void update_head_pos(int disk, struct r1bio *r1_bio)
 {
 	conf_t *conf = r1_bio->mddev->private;
 
@@ -268,7 +268,7 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
 /*
  * Find the disk number which triggered given bio
  */
-static int find_bio_disk(r1bio_t *r1_bio, struct bio *bio)
+static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
 {
 	int mirror;
 	int raid_disks = r1_bio->mddev->raid_disks;
@@ -286,7 +286,7 @@ static int find_bio_disk(r1bio_t *r1_bio, struct bio *bio)
 static void raid1_end_read_request(struct bio *bio, int error)
 {
 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	r1bio_t *r1_bio = bio->bi_private;
+	struct r1bio *r1_bio = bio->bi_private;
 	int mirror;
 	conf_t *conf = r1_bio->mddev->private;
 
@@ -333,7 +333,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
 	rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
 }
 
-static void close_write(r1bio_t *r1_bio)
+static void close_write(struct r1bio *r1_bio)
 {
 	/* it really is the end of this request */
 	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
@@ -352,7 +352,7 @@ static void close_write(r1bio_t *r1_bio)
 	md_write_end(r1_bio->mddev);
 }
 
-static void r1_bio_write_done(r1bio_t *r1_bio)
+static void r1_bio_write_done(struct r1bio *r1_bio)
 {
 	if (!atomic_dec_and_test(&r1_bio->remaining))
 		return;
@@ -371,7 +371,7 @@ static void r1_bio_write_done(r1bio_t *r1_bio)
 static void raid1_end_write_request(struct bio *bio, int error)
 {
 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	r1bio_t *r1_bio = bio->bi_private;
+	struct r1bio *r1_bio = bio->bi_private;
 	int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
 	conf_t *conf = r1_bio->mddev->private;
 	struct bio *to_put = NULL;
@@ -466,7 +466,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
  *
  * The rdev for the device selected will have nr_pending incremented.
  */
-static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors)
+static int read_balance(conf_t *conf, struct r1bio *r1_bio, int *max_sectors)
 {
 	const sector_t this_sector = r1_bio->sector;
 	int sectors;
@@ -764,7 +764,7 @@ static void unfreeze_array(conf_t *conf)
 
 /* duplicate the data pages for behind I/O 
  */
-static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio)
+static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
 {
 	int i;
 	struct bio_vec *bvec;
@@ -800,7 +800,7 @@ static int make_request(struct mddev *mddev, struct bio * bio)
 {
 	conf_t *conf = mddev->private;
 	mirror_info_t *mirror;
-	r1bio_t *r1_bio;
+	struct r1bio *r1_bio;
 	struct bio *read_bio;
 	int i, disks;
 	struct bitmap *bitmap;
@@ -1354,7 +1354,7 @@ abort:
 
 static void end_sync_read(struct bio *bio, int error)
 {
-	r1bio_t *r1_bio = bio->bi_private;
+	struct r1bio *r1_bio = bio->bi_private;
 
 	update_head_pos(r1_bio->read_disk, r1_bio);
 
@@ -1373,7 +1373,7 @@ static void end_sync_read(struct bio *bio, int error)
 static void end_sync_write(struct bio *bio, int error)
 {
 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	r1bio_t *r1_bio = bio->bi_private;
+	struct r1bio *r1_bio = bio->bi_private;
 	struct mddev *mddev = r1_bio->mddev;
 	conf_t *conf = mddev->private;
 	int mirror=0;
@@ -1433,7 +1433,7 @@ static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
 	return 0;
 }
 
-static int fix_sync_read_error(r1bio_t *r1_bio)
+static int fix_sync_read_error(struct r1bio *r1_bio)
 {
 	/* Try some synchronous reads of other devices to get
 	 * good data, much like with normal read errors.  Only
@@ -1553,7 +1553,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
 	return 1;
 }
 
-static int process_checks(r1bio_t *r1_bio)
+static int process_checks(struct r1bio *r1_bio)
 {
 	/* We have read all readable devices.  If we haven't
 	 * got the block, then there is no hope left.
@@ -1635,7 +1635,7 @@ static int process_checks(r1bio_t *r1_bio)
 	return 0;
 }
 
-static void sync_request_write(struct mddev *mddev, r1bio_t *r1_bio)
+static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
 {
 	conf_t *conf = mddev->private;
 	int i;
@@ -1790,7 +1790,7 @@ static int submit_bio_wait(int rw, struct bio *bio)
 	return test_bit(BIO_UPTODATE, &bio->bi_flags);
 }
 
-static int narrow_write_error(r1bio_t *r1_bio, int i)
+static int narrow_write_error(struct r1bio *r1_bio, int i)
 {
 	struct mddev *mddev = r1_bio->mddev;
 	conf_t *conf = mddev->private;
@@ -1866,7 +1866,7 @@ static int narrow_write_error(r1bio_t *r1_bio, int i)
 	return ok;
 }
 
-static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio)
+static void handle_sync_write_finished(conf_t *conf, struct r1bio *r1_bio)
 {
 	int m;
 	int s = r1_bio->sectors;
@@ -1889,7 +1889,7 @@ static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio)
 	md_done_sync(conf->mddev, s, 1);
 }
 
-static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio)
+static void handle_write_finished(conf_t *conf, struct r1bio *r1_bio)
 {
 	int m;
 	for (m = 0; m < conf->raid_disks ; m++)
@@ -1918,7 +1918,7 @@ static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio)
 	raid_end_bio_io(r1_bio);
 }
 
-static void handle_read_error(conf_t *conf, r1bio_t *r1_bio)
+static void handle_read_error(conf_t *conf, struct r1bio *r1_bio)
 {
 	int disk;
 	int max_sectors;
@@ -2010,7 +2010,7 @@ read_more:
 
 static void raid1d(struct mddev *mddev)
 {
-	r1bio_t *r1_bio;
+	struct r1bio *r1_bio;
 	unsigned long flags;
 	conf_t *conf = mddev->private;
 	struct list_head *head = &conf->retry_list;
@@ -2029,7 +2029,7 @@ static void raid1d(struct mddev *mddev)
 			spin_unlock_irqrestore(&conf->device_lock, flags);
 			break;
 		}
-		r1_bio = list_entry(head->prev, r1bio_t, retry_list);
+		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
 		list_del(head->prev);
 		conf->nr_queued--;
 		spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -2088,7 +2088,7 @@ static int init_resync(conf_t *conf)
 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
 {
 	conf_t *conf = mddev->private;
-	r1bio_t *r1_bio;
+	struct r1bio *r1_bio;
 	struct bio *bio;
 	sector_t max_sector, nr_sectors;
 	int disk = -1;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 07e9cb91186b..d59e4676a1b0 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -21,9 +21,6 @@ struct pool_info {
 	int	raid_disks;
 };
 
-
-typedef struct r1bio_s r1bio_t;
-
 struct r1_private_data_s {
 	struct mddev		*mddev;
 	mirror_info_t		*mirrors;
@@ -43,9 +40,9 @@ struct r1_private_data_s {
 
 	spinlock_t		device_lock;
 
-	/* list of 'r1bio_t' that need to be processed by raid1d, whether
-	 * to retry a read, writeout a resync or recovery block, or
-	 * anything else.
+	/* list of 'struct r1bio' that need to be processed by raid1d,
+	 * whether to retry a read, writeout a resync or recovery
+	 * block, or anything else.
 	 */
 	struct list_head	retry_list;
 
@@ -80,8 +77,8 @@ struct r1_private_data_s {
 	 * mempools - it changes when the array grows or shrinks
 	 */
 	struct pool_info	*poolinfo;
-	mempool_t *r1bio_pool;
-	mempool_t *r1buf_pool;
+	mempool_t		*r1bio_pool;
+	mempool_t		*r1buf_pool;
 
 	/* temporary buffer to synchronous IO when attempting to repair
 	 * a read error.
@@ -104,7 +101,7 @@ typedef struct r1_private_data_s conf_t;
  * for this RAID1 operation, and about their status:
  */
 
-struct r1bio_s {
+struct r1bio {
 	atomic_t		remaining; /* 'have we finished' count,
 					    * used from IRQ handlers
 					    */
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8c3bd6061f96..6927998e253a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -64,7 +64,7 @@ static void lower_barrier(conf_t *conf);
 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
 	conf_t *conf = data;
-	int size = offsetof(struct r10bio_s, devs[conf->copies]);
+	int size = offsetof(struct r10bio, devs[conf->copies]);
 
 	/* allocate a r10bio with room for raid_disks entries in the bios array */
 	return kzalloc(size, gfp_flags);
@@ -94,7 +94,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
 {
 	conf_t *conf = data;
 	struct page *page;
-	r10bio_t *r10_bio;
+	struct r10bio *r10_bio;
 	struct bio *bio;
 	int i, j;
 	int nalloc;
@@ -159,7 +159,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
 {
 	int i;
 	conf_t *conf = data;
-	r10bio_t *r10bio = __r10_bio;
+	struct r10bio *r10bio = __r10_bio;
 	int j;
 
 	for (j=0; j < conf->copies; j++) {
@@ -175,7 +175,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
 	r10bio_pool_free(r10bio, conf);
 }
 
-static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
+static void put_all_bios(conf_t *conf, struct r10bio *r10_bio)
 {
 	int i;
 
@@ -187,7 +187,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
 	}
 }
 
-static void free_r10bio(r10bio_t *r10_bio)
+static void free_r10bio(struct r10bio *r10_bio)
 {
 	conf_t *conf = r10_bio->mddev->private;
 
@@ -195,7 +195,7 @@ static void free_r10bio(r10bio_t *r10_bio)
 	mempool_free(r10_bio, conf->r10bio_pool);
 }
 
-static void put_buf(r10bio_t *r10_bio)
+static void put_buf(struct r10bio *r10_bio)
 {
 	conf_t *conf = r10_bio->mddev->private;
 
@@ -204,7 +204,7 @@ static void put_buf(r10bio_t *r10_bio)
 	lower_barrier(conf);
 }
 
-static void reschedule_retry(r10bio_t *r10_bio)
+static void reschedule_retry(struct r10bio *r10_bio)
 {
 	unsigned long flags;
 	struct mddev *mddev = r10_bio->mddev;
@@ -226,7 +226,7 @@ static void reschedule_retry(r10bio_t *r10_bio)
  * operation and are ready to return a success/failure code to the buffer
  * cache layer.
  */
-static void raid_end_bio_io(r10bio_t *r10_bio)
+static void raid_end_bio_io(struct r10bio *r10_bio)
 {
 	struct bio *bio = r10_bio->master_bio;
 	int done;
@@ -256,7 +256,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
 /*
  * Update disk head position estimator based on IRQ completion info.
  */
-static inline void update_head_pos(int slot, r10bio_t *r10_bio)
+static inline void update_head_pos(int slot, struct r10bio *r10_bio)
 {
 	conf_t *conf = r10_bio->mddev->private;
 
@@ -267,7 +267,7 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
 /*
  * Find the disk number which triggered given bio
  */
-static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio,
+static int find_bio_disk(conf_t *conf, struct r10bio *r10_bio,
 			 struct bio *bio, int *slotp)
 {
 	int slot;
@@ -287,7 +287,7 @@ static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio,
 static void raid10_end_read_request(struct bio *bio, int error)
 {
 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	r10bio_t *r10_bio = bio->bi_private;
+	struct r10bio *r10_bio = bio->bi_private;
 	int slot, dev;
 	conf_t *conf = r10_bio->mddev->private;
 
@@ -327,7 +327,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
 	}
 }
 
-static void close_write(r10bio_t *r10_bio)
+static void close_write(struct r10bio *r10_bio)
 {
 	/* clear the bitmap if all writes complete successfully */
 	bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
@@ -337,7 +337,7 @@ static void close_write(r10bio_t *r10_bio)
 	md_write_end(r10_bio->mddev);
 }
 
-static void one_write_done(r10bio_t *r10_bio)
+static void one_write_done(struct r10bio *r10_bio)
 {
 	if (atomic_dec_and_test(&r10_bio->remaining)) {
 		if (test_bit(R10BIO_WriteError, &r10_bio->state))
@@ -355,7 +355,7 @@ static void one_write_done(r10bio_t *r10_bio)
 static void raid10_end_write_request(struct bio *bio, int error)
 {
 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	r10bio_t *r10_bio = bio->bi_private;
+	struct r10bio *r10_bio = bio->bi_private;
 	int dev;
 	int dec_rdev = 1;
 	conf_t *conf = r10_bio->mddev->private;
@@ -433,7 +433,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
  * sector offset to a virtual address
  */
 
-static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
+static void raid10_find_phys(conf_t *conf, struct r10bio *r10bio)
 {
 	int n,f;
 	sector_t sector;
@@ -555,7 +555,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
  * FIXME: possibly should rethink readbalancing and do it differently
  * depending on near_copies / far_copies geometry.
  */
-static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
+static int read_balance(conf_t *conf, struct r10bio *r10_bio, int *max_sectors)
 {
 	const sector_t this_sector = r10_bio->sector;
 	int disk, slot;
@@ -834,7 +834,7 @@ static int make_request(struct mddev *mddev, struct bio * bio)
 {
 	conf_t *conf = mddev->private;
 	mirror_info_t *mirror;
-	r10bio_t *r10_bio;
+	struct r10bio *r10_bio;
 	struct bio *read_bio;
 	int i;
 	int chunk_sects = conf->chunk_mask + 1;
@@ -1411,7 +1411,7 @@ abort:
 
 static void end_sync_read(struct bio *bio, int error)
 {
-	r10bio_t *r10_bio = bio->bi_private;
+	struct r10bio *r10_bio = bio->bi_private;
 	conf_t *conf = r10_bio->mddev->private;
 	int d;
 
@@ -1439,7 +1439,7 @@ static void end_sync_read(struct bio *bio, int error)
 	}
 }
 
-static void end_sync_request(r10bio_t *r10_bio)
+static void end_sync_request(struct r10bio *r10_bio)
 {
 	struct mddev *mddev = r10_bio->mddev;
 
@@ -1455,7 +1455,7 @@ static void end_sync_request(r10bio_t *r10_bio)
 			md_done_sync(mddev, s, 1);
 			break;
 		} else {
-			r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
+			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
 			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
 			    test_bit(R10BIO_WriteError, &r10_bio->state))
 				reschedule_retry(r10_bio);
@@ -1469,7 +1469,7 @@ static void end_sync_request(r10bio_t *r10_bio)
 static void end_sync_write(struct bio *bio, int error)
 {
 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	r10bio_t *r10_bio = bio->bi_private;
+	struct r10bio *r10_bio = bio->bi_private;
 	struct mddev *mddev = r10_bio->mddev;
 	conf_t *conf = mddev->private;
 	int d;
@@ -1509,7 +1509,7 @@ static void end_sync_write(struct bio *bio, int error)
  * We check if all blocks are in-sync and only write to blocks that
  * aren't in sync
  */
-static void sync_request_write(struct mddev *mddev, r10bio_t *r10_bio)
+static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 {
 	conf_t *conf = mddev->private;
 	int i, first;
@@ -1609,7 +1609,7 @@ done:
  * The second for writing.
  *
  */
-static void fix_recovery_read_error(r10bio_t *r10_bio)
+static void fix_recovery_read_error(struct r10bio *r10_bio)
 {
 	/* We got a read error during recovery.
 	 * We repeat the read in smaller page-sized sections.
@@ -1688,7 +1688,7 @@ static void fix_recovery_read_error(r10bio_t *r10_bio)
 	}
 }
 
-static void recovery_request_write(struct mddev *mddev, r10bio_t *r10_bio)
+static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 {
 	conf_t *conf = mddev->private;
 	int d;
@@ -1778,7 +1778,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
  *	3.	Performs writes following reads for array synchronising.
  */
 
-static void fix_read_error(conf_t *conf, struct mddev *mddev, r10bio_t *r10_bio)
+static void fix_read_error(conf_t *conf, struct mddev *mddev, struct r10bio *r10_bio)
 {
 	int sect = 0; /* Offset from r10_bio->sector */
 	int sectors = r10_bio->sectors;
@@ -1983,7 +1983,7 @@ static int submit_bio_wait(int rw, struct bio *bio)
 	return test_bit(BIO_UPTODATE, &bio->bi_flags);
 }
 
-static int narrow_write_error(r10bio_t *r10_bio, int i)
+static int narrow_write_error(struct r10bio *r10_bio, int i)
 {
 	struct bio *bio = r10_bio->master_bio;
 	struct mddev *mddev = r10_bio->mddev;
@@ -2040,7 +2040,7 @@ static int narrow_write_error(r10bio_t *r10_bio, int i)
 	return ok;
 }
 
-static void handle_read_error(struct mddev *mddev, r10bio_t *r10_bio)
+static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
 {
 	int slot = r10_bio->read_slot;
 	int mirror = r10_bio->devs[slot].devnum;
@@ -2139,7 +2139,7 @@ read_more:
 		generic_make_request(bio);
 }
 
-static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio)
+static void handle_write_completed(conf_t *conf, struct r10bio *r10_bio)
 {
 	/* Some sort of write request has finished and it
 	 * succeeded in writing where we thought there was a
@@ -2202,7 +2202,7 @@ static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio)
 
 static void raid10d(struct mddev *mddev)
 {
-	r10bio_t *r10_bio;
+	struct r10bio *r10_bio;
 	unsigned long flags;
 	conf_t *conf = mddev->private;
 	struct list_head *head = &conf->retry_list;
@@ -2220,7 +2220,7 @@ static void raid10d(struct mddev *mddev)
 			spin_unlock_irqrestore(&conf->device_lock, flags);
 			break;
 		}
-		r10_bio = list_entry(head->prev, r10bio_t, retry_list);
+		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
 		list_del(head->prev);
 		conf->nr_queued--;
 		spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -2301,7 +2301,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
 			     int *skipped, int go_faster)
 {
 	conf_t *conf = mddev->private;
-	r10bio_t *r10_bio;
+	struct r10bio *r10_bio;
 	struct bio *biolist = NULL, *bio;
 	sector_t max_sector, nr_sectors;
 	int i;
@@ -2393,7 +2393,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
 
 		for (i=0 ; i<conf->raid_disks; i++) {
 			int still_degraded;
-			r10bio_t *rb2;
+			struct r10bio *rb2;
 			sector_t sect;
 			int must_sync;
 			int any_working;
@@ -2547,8 +2547,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
 		}
 		if (biolist == NULL) {
 			while (r10_bio) {
-				r10bio_t *rb2 = r10_bio;
-				r10_bio = (r10bio_t*) rb2->master_bio;
+				struct r10bio *rb2 = r10_bio;
+				r10_bio = (struct r10bio*) rb2->master_bio;
 				rb2->master_bio = NULL;
 				put_buf(rb2);
 			}
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index c7721365f7bd..be7f8d9cfc63 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -13,8 +13,6 @@ struct mirror_info {
 						 */
 };
 
-typedef struct r10bio_s r10bio_t;
-
 struct r10_private_data_s {
 	struct mddev		*mddev;
 	mirror_info_t		*mirrors;
@@ -80,7 +78,7 @@ typedef struct r10_private_data_s conf_t;
  * for this RAID10 operation, and about their status:
  */
 
-struct r10bio_s {
+struct r10bio {
 	atomic_t		remaining; /* 'have we finished' count,
 					    * used from IRQ handlers
 					    */