summary refs log tree commit diff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-16 10:50:19 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-16 10:50:19 -0700
commite245befce7af0a1e1347079ed62695b059594bd4 (patch)
tree08270a503c8945b4e6ba142728dc289de2b55542
parent14dc5249728ff699b1ca4dac01ad416a350a147a (diff)
parent58ff411e0d21592565ac9ab34f33a434f26e018b (diff)
downloadlinux-e245befce7af0a1e1347079ed62695b059594bd4.tar.gz
Merge branch 'bsg' of git://git.kernel.dk/data/git/linux-2.6-block
* 'bsg' of git://git.kernel.dk/data/git/linux-2.6-block: (25 commits)
  bsg: Kconfig updates
  bsg: add SCSI transport-level request support
  bsg: add bidi support
  add a struct request pointer to the request structure
  bsg: fix the deadlock on discarding done commands
  bsg: fix a blocking read bug
  bsg: minor bug fixes
  improve bsg device allocation
  bind bsg to all SCSI devices
  bsg: bind bsg to request_queue instead of gendisk
  bsg: add a request_queue argument to scsi_cmd_ioctl()
  bsg: simplify __bsg_alloc_command failpath
  bsg: add cheasy error checks for sysfs stuff
  Add queue resizing support
  Replace s32, u32 and u64 with __s32, __u32 and __u64 in bsg.h for userspace
  bsg: silence a bogus gcc warning
  bsg: style cleanup
  bsg: use u32 etc instead of uint32_t
  bsg: add SG_IO to SG v4
  bsg: replace SG v3 with SG v4
  ...
-rw-r--r--block/Kconfig8
-rw-r--r--block/Makefile1
-rw-r--r--block/bsg.c1114
-rw-r--r--block/ll_rw_blk.c1
-rw-r--r--block/scsi_ioctl.c163
-rw-r--r--drivers/block/ub.c2
-rw-r--r--drivers/cdrom/cdrom.c3
-rw-r--r--drivers/ide/ide-floppy.c29
-rw-r--r--drivers/ide/ide.c10
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--include/linux/blkdev.h26
-rw-r--r--include/linux/bsg.h70
13 files changed, 1341 insertions, 91 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 285935134bcd..1d16b08e1506 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -51,4 +51,12 @@ config LSF
 
 endif # BLOCK
 
+config BLK_DEV_BSG
+	bool "Block layer SG support"
+	depends on SCSI && EXPERIMENTAL
+	default y
+	---help---
+	Saying Y here will enable generic SG (SCSI generic) v4
+	support for any block device.
+
 source block/Kconfig.iosched
diff --git a/block/Makefile b/block/Makefile
index 4b84d0d5947b..959feeb253be 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_BLOCK) := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
 
+obj-$(CONFIG_BLK_DEV_BSG)	+= bsg.o
 obj-$(CONFIG_IOSCHED_NOOP)	+= noop-iosched.o
 obj-$(CONFIG_IOSCHED_AS)	+= as-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE)	+= deadline-iosched.o
diff --git a/block/bsg.c b/block/bsg.c
new file mode 100644
index 000000000000..461c9f56f3ee
--- /dev/null
+++ b/block/bsg.c
@@ -0,0 +1,1114 @@
+/*
+ * bsg.c - block layer implementation of the sg v3 interface
+ *
+ * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
+ * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License version 2.  See the file "COPYING" in the main directory of this
+ *  archive for more details.
+ *
+ */
+/*
+ * TODO
+ *	- Should this get merged, block/scsi_ioctl.c will be migrated into
+ *	  this file. To keep maintenance down, it's easier to have them
+ *	  seperated right now.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/blkdev.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/percpu.h>
+#include <linux/uio.h>
+#include <linux/bsg.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/sg.h>
+
+static char bsg_version[] = "block layer sg (bsg) 0.4";
+
+struct bsg_device {
+	request_queue_t *queue;
+	spinlock_t lock;
+	struct list_head busy_list;
+	struct list_head done_list;
+	struct hlist_node dev_list;
+	atomic_t ref_count;
+	int minor;
+	int queued_cmds;
+	int done_cmds;
+	wait_queue_head_t wq_done;
+	wait_queue_head_t wq_free;
+	char name[BUS_ID_SIZE];
+	int max_queue;
+	unsigned long flags;
+};
+
+enum {
+	BSG_F_BLOCK		= 1,
+	BSG_F_WRITE_PERM	= 2,
+};
+
+#define BSG_DEFAULT_CMDS	64
+#define BSG_MAX_DEVS		32768
+
+#undef BSG_DEBUG
+
+#ifdef BSG_DEBUG
+#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
+#else
+#define dprintk(fmt, args...)
+#endif
+
+#define list_entry_bc(entry)	list_entry((entry), struct bsg_command, list)
+
+/*
+ * just for testing
+ */
+#define BSG_MAJOR	(240)
+
+static DEFINE_MUTEX(bsg_mutex);
+static int bsg_device_nr, bsg_minor_idx;
+
+#define BSG_LIST_SIZE	(8)
+#define bsg_list_idx(minor)	((minor) & (BSG_LIST_SIZE - 1))
+static struct hlist_head bsg_device_list[BSG_LIST_SIZE];
+
+static struct class *bsg_class;
+static LIST_HEAD(bsg_class_list);
+
+static struct kmem_cache *bsg_cmd_cachep;
+
+/*
+ * our internal command type
+ */
+struct bsg_command {
+	struct bsg_device *bd;
+	struct list_head list;
+	struct request *rq;
+	struct bio *bio;
+	struct bio *bidi_bio;
+	int err;
+	struct sg_io_v4 hdr;
+	struct sg_io_v4 __user *uhdr;
+	char sense[SCSI_SENSE_BUFFERSIZE];
+};
+
+static void bsg_free_command(struct bsg_command *bc)
+{
+	struct bsg_device *bd = bc->bd;
+	unsigned long flags;
+
+	kmem_cache_free(bsg_cmd_cachep, bc);
+
+	spin_lock_irqsave(&bd->lock, flags);
+	bd->queued_cmds--;
+	spin_unlock_irqrestore(&bd->lock, flags);
+
+	wake_up(&bd->wq_free);
+}
+
+static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
+{
+	struct bsg_command *bc = ERR_PTR(-EINVAL);
+
+	spin_lock_irq(&bd->lock);
+
+	if (bd->queued_cmds >= bd->max_queue)
+		goto out;
+
+	bd->queued_cmds++;
+	spin_unlock_irq(&bd->lock);
+
+	bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER);
+	if (unlikely(!bc)) {
+		spin_lock_irq(&bd->lock);
+		bd->queued_cmds--;
+		bc = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	memset(bc, 0, sizeof(*bc));
+	bc->bd = bd;
+	INIT_LIST_HEAD(&bc->list);
+	dprintk("%s: returning free cmd %p\n", bd->name, bc);
+	return bc;
+out:
+	spin_unlock_irq(&bd->lock);
+	return bc;
+}
+
+static inline void
+bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
+{
+	bd->done_cmds--;
+	list_del(&bc->list);
+}
+
+static inline void
+bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
+{
+	bd->done_cmds++;
+	list_add_tail(&bc->list, &bd->done_list);
+	wake_up(&bd->wq_done);
+}
+
+static inline int bsg_io_schedule(struct bsg_device *bd, int state)
+{
+	DEFINE_WAIT(wait);
+	int ret = 0;
+
+	spin_lock_irq(&bd->lock);
+
+	BUG_ON(bd->done_cmds > bd->queued_cmds);
+
+	/*
+	 * -ENOSPC or -ENODATA?  I'm going for -ENODATA, meaning "I have no
+	 * work to do", even though we return -ENOSPC after this same test
+	 * during bsg_write() -- there, it means our buffer can't have more
+	 * bsg_commands added to it, thus has no space left.
+	 */
+	if (bd->done_cmds == bd->queued_cmds) {
+		ret = -ENODATA;
+		goto unlock;
+	}
+
+	if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
+		ret = -EAGAIN;
+		goto unlock;
+	}
+
+	prepare_to_wait(&bd->wq_done, &wait, state);
+	spin_unlock_irq(&bd->lock);
+	io_schedule();
+	finish_wait(&bd->wq_done, &wait);
+
+	if ((state == TASK_INTERRUPTIBLE) && signal_pending(current))
+		ret = -ERESTARTSYS;
+
+	return ret;
+unlock:
+	spin_unlock_irq(&bd->lock);
+	return ret;
+}
+
+static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
+				struct sg_io_v4 *hdr, int has_write_perm)
+{
+	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+
+	if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
+			   hdr->request_len))
+		return -EFAULT;
+
+	if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
+		if (blk_verify_command(rq->cmd, has_write_perm))
+			return -EPERM;
+	} else if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	/*
+	 * fill in request structure
+	 */
+	rq->cmd_len = hdr->request_len;
+	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+	rq->timeout = (hdr->timeout * HZ) / 1000;
+	if (!rq->timeout)
+		rq->timeout = q->sg_timeout;
+	if (!rq->timeout)
+		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+
+	return 0;
+}
+
+/*
+ * Check if sg_io_v4 from user is allowed and valid
+ */
+static int
+bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
+{
+	int ret = 0;
+
+	if (hdr->guard != 'Q')
+		return -EINVAL;
+	if (hdr->request_len > BLK_MAX_CDB)
+		return -EINVAL;
+	if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
+	    hdr->din_xfer_len > (q->max_sectors << 9))
+		return -EIO;
+
+	switch (hdr->protocol) {
+	case BSG_PROTOCOL_SCSI:
+		switch (hdr->subprotocol) {
+		case BSG_SUB_PROTOCOL_SCSI_CMD:
+		case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
+			break;
+		default:
+			ret = -EINVAL;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	*rw = hdr->dout_xfer_len ? WRITE : READ;
+	return ret;
+}
+
+/*
+ * map sg_io_v4 to a request.
+ */
+static struct request *
+bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
+{
+	request_queue_t *q = bd->queue;
+	struct request *rq, *next_rq = NULL;
+	int ret, rw = 0; /* shut up gcc */
+	unsigned int dxfer_len;
+	void *dxferp = NULL;
+
+	dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
+		hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
+		hdr->din_xfer_len);
+
+	ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
+	if (ret)
+		return ERR_PTR(ret);
+
+	/*
+	 * map scatter-gather elements seperately and string them to request
+	 */
+	rq = blk_get_request(q, rw, GFP_KERNEL);
+	if (!rq)
+		return ERR_PTR(-ENOMEM);
+	ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
+						       &bd->flags));
+	if (ret)
+		goto out;
+
+	if (rw == WRITE && hdr->din_xfer_len) {
+		if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
+			ret = -EOPNOTSUPP;
+			goto out;
+		}
+
+		next_rq = blk_get_request(q, READ, GFP_KERNEL);
+		if (!next_rq) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		rq->next_rq = next_rq;
+
+		dxferp = (void*)(unsigned long)hdr->din_xferp;
+		ret =  blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
+		if (ret)
+			goto out;
+	}
+
+	if (hdr->dout_xfer_len) {
+		dxfer_len = hdr->dout_xfer_len;
+		dxferp = (void*)(unsigned long)hdr->dout_xferp;
+	} else if (hdr->din_xfer_len) {
+		dxfer_len = hdr->din_xfer_len;
+		dxferp = (void*)(unsigned long)hdr->din_xferp;
+	} else
+		dxfer_len = 0;
+
+	if (dxfer_len) {
+		ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
+		if (ret)
+			goto out;
+	}
+	return rq;
+out:
+	blk_put_request(rq);
+	if (next_rq) {
+		blk_rq_unmap_user(next_rq->bio);
+		blk_put_request(next_rq);
+	}
+	return ERR_PTR(ret);
+}
+
+/*
+ * async completion call-back from the block layer, when scsi/ide/whatever
+ * calls end_that_request_last() on a request
+ */
+static void bsg_rq_end_io(struct request *rq, int uptodate)
+{
+	struct bsg_command *bc = rq->end_io_data;
+	struct bsg_device *bd = bc->bd;
+	unsigned long flags;
+
+	dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
+		bd->name, rq, bc, bc->bio, uptodate);
+
+	bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
+
+	spin_lock_irqsave(&bd->lock, flags);
+	list_del(&bc->list);
+	bsg_add_done_cmd(bd, bc);
+	spin_unlock_irqrestore(&bd->lock, flags);
+}
+
+/*
+ * do final setup of a 'bc' and submit the matching 'rq' to the block
+ * layer for io
+ */
+static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
+			    struct bsg_command *bc, struct request *rq)
+{
+	rq->sense = bc->sense;
+	rq->sense_len = 0;
+
+	/*
+	 * add bc command to busy queue and submit rq for io
+	 */
+	bc->rq = rq;
+	bc->bio = rq->bio;
+	if (rq->next_rq)
+		bc->bidi_bio = rq->next_rq->bio;
+	bc->hdr.duration = jiffies;
+	spin_lock_irq(&bd->lock);
+	list_add_tail(&bc->list, &bd->busy_list);
+	spin_unlock_irq(&bd->lock);
+
+	dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
+
+	rq->end_io_data = bc;
+	blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
+}
+
+static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
+{
+	struct bsg_command *bc = NULL;
+
+	spin_lock_irq(&bd->lock);
+	if (bd->done_cmds) {
+		bc = list_entry_bc(bd->done_list.next);
+		bsg_del_done_cmd(bd, bc);
+	}
+	spin_unlock_irq(&bd->lock);
+
+	return bc;
+}
+
+/*
+ * Get a finished command from the done list
+ */
+static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
+{
+	struct bsg_command *bc;
+	int ret;
+
+	do {
+		bc = bsg_next_done_cmd(bd);
+		if (bc)
+			break;
+
+		if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
+			bc = ERR_PTR(-EAGAIN);
+			break;
+		}
+
+		ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
+		if (ret) {
+			bc = ERR_PTR(-ERESTARTSYS);
+			break;
+		}
+	} while (1);
+
+	dprintk("%s: returning done %p\n", bd->name, bc);
+
+	return bc;
+}
+
+static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
+				    struct bio *bio, struct bio *bidi_bio)
+{
+	int ret = 0;
+
+	dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
+	/*
+	 * fill in all the output members
+	 */
+	hdr->device_status = status_byte(rq->errors);
+	hdr->transport_status = host_byte(rq->errors);
+	hdr->driver_status = driver_byte(rq->errors);
+	hdr->info = 0;
+	if (hdr->device_status || hdr->transport_status || hdr->driver_status)
+		hdr->info |= SG_INFO_CHECK;
+	hdr->din_resid = rq->data_len;
+	hdr->response_len = 0;
+
+	if (rq->sense_len && hdr->response) {
+		int len = min((unsigned int) hdr->max_response_len,
+			      rq->sense_len);
+
+		ret = copy_to_user((void*)(unsigned long)hdr->response,
+				   rq->sense, len);
+		if (!ret)
+			hdr->response_len = len;
+		else
+			ret = -EFAULT;
+	}
+
+	if (rq->next_rq) {
+		blk_rq_unmap_user(bidi_bio);
+		blk_put_request(rq->next_rq);
+	}
+
+	blk_rq_unmap_user(bio);
+	blk_put_request(rq);
+
+	return ret;
+}
+
+static int bsg_complete_all_commands(struct bsg_device *bd)
+{
+	struct bsg_command *bc;
+	int ret, tret;
+
+	dprintk("%s: entered\n", bd->name);
+
+	set_bit(BSG_F_BLOCK, &bd->flags);
+
+	/*
+	 * wait for all commands to complete
+	 */
+	ret = 0;
+	do {
+		ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE);
+		/*
+		 * look for -ENODATA specifically -- we'll sometimes get
+		 * -ERESTARTSYS when we've taken a signal, but we can't
+		 * return until we're done freeing the queue, so ignore
+		 * it.  The signal will get handled when we're done freeing
+		 * the bsg_device.
+		 */
+	} while (ret != -ENODATA);
+
+	/*
+	 * discard done commands
+	 */
+	ret = 0;
+	do {
+		spin_lock_irq(&bd->lock);
+		if (!bd->queued_cmds) {
+			spin_unlock_irq(&bd->lock);
+			break;
+		}
+		spin_unlock_irq(&bd->lock);
+
+		bc = bsg_get_done_cmd(bd);
+		if (IS_ERR(bc))
+			break;
+
+		tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
+						bc->bidi_bio);
+		if (!ret)
+			ret = tret;
+
+		bsg_free_command(bc);
+	} while (1);
+
+	return ret;
+}
+
+static ssize_t
+__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
+	   const struct iovec *iov, ssize_t *bytes_read)
+{
+	struct bsg_command *bc;
+	int nr_commands, ret;
+
+	if (count % sizeof(struct sg_io_v4))
+		return -EINVAL;
+
+	ret = 0;
+	nr_commands = count / sizeof(struct sg_io_v4);
+	while (nr_commands) {
+		bc = bsg_get_done_cmd(bd);
+		if (IS_ERR(bc)) {
+			ret = PTR_ERR(bc);
+			break;
+		}
+
+		/*
+		 * this is the only case where we need to copy data back
+		 * after completing the request. so do that here,
+		 * bsg_complete_work() cannot do that for us
+		 */
+		ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
+					       bc->bidi_bio);
+
+		if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr)))
+			ret = -EFAULT;
+
+		bsg_free_command(bc);
+
+		if (ret)
+			break;
+
+		buf += sizeof(struct sg_io_v4);
+		*bytes_read += sizeof(struct sg_io_v4);
+		nr_commands--;
+	}
+
+	return ret;
+}
+
+static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
+{
+	if (file->f_flags & O_NONBLOCK)
+		clear_bit(BSG_F_BLOCK, &bd->flags);
+	else
+		set_bit(BSG_F_BLOCK, &bd->flags);
+}
+
+static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
+{
+	if (file->f_mode & FMODE_WRITE)
+		set_bit(BSG_F_WRITE_PERM, &bd->flags);
+	else
+		clear_bit(BSG_F_WRITE_PERM, &bd->flags);
+}
+
+static inline int err_block_err(int ret)
+{
+	if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
+		return 1;
+
+	return 0;
+}
+
+static ssize_t
+bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+	struct bsg_device *bd = file->private_data;
+	int ret;
+	ssize_t bytes_read;
+
+	dprintk("%s: read %Zd bytes\n", bd->name, count);
+
+	bsg_set_block(bd, file);
+	bytes_read = 0;
+	ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
+	*ppos = bytes_read;
+
+	if (!bytes_read || (bytes_read && err_block_err(ret)))
+		bytes_read = ret;
+
+	return bytes_read;
+}
+
+static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf,
+			   size_t count, ssize_t *bytes_read)
+{
+	struct bsg_command *bc;
+	struct request *rq;
+	int ret, nr_commands;
+
+	if (count % sizeof(struct sg_io_v4))
+		return -EINVAL;
+
+	nr_commands = count / sizeof(struct sg_io_v4);
+	rq = NULL;
+	bc = NULL;
+	ret = 0;
+	while (nr_commands) {
+		request_queue_t *q = bd->queue;
+
+		bc = bsg_alloc_command(bd);
+		if (IS_ERR(bc)) {
+			ret = PTR_ERR(bc);
+			bc = NULL;
+			break;
+		}
+
+		bc->uhdr = (struct sg_io_v4 __user *) buf;
+		if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		/*
+		 * get a request, fill in the blanks, and add to request queue
+		 */
+		rq = bsg_map_hdr(bd, &bc->hdr);
+		if (IS_ERR(rq)) {
+			ret = PTR_ERR(rq);
+			rq = NULL;
+			break;
+		}
+
+		bsg_add_command(bd, q, bc, rq);
+		bc = NULL;
+		rq = NULL;
+		nr_commands--;
+		buf += sizeof(struct sg_io_v4);
+		*bytes_read += sizeof(struct sg_io_v4);
+	}
+
+	if (bc)
+		bsg_free_command(bc);
+
+	return ret;
+}
+
+static ssize_t
+bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct bsg_device *bd = file->private_data;
+	ssize_t bytes_read;
+	int ret;
+
+	dprintk("%s: write %Zd bytes\n", bd->name, count);
+
+	bsg_set_block(bd, file);
+	bsg_set_write_perm(bd, file);
+
+	bytes_read = 0;
+	ret = __bsg_write(bd, buf, count, &bytes_read);
+	*ppos = bytes_read;
+
+	/*
+	 * return bytes written on non-fatal errors
+	 */
+	if (!bytes_read || (bytes_read && err_block_err(ret)))
+		bytes_read = ret;
+
+	dprintk("%s: returning %Zd\n", bd->name, bytes_read);
+	return bytes_read;
+}
+
+static struct bsg_device *bsg_alloc_device(void)
+{
+	struct bsg_device *bd;
+
+	bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
+	if (unlikely(!bd))
+		return NULL;
+
+	spin_lock_init(&bd->lock);
+
+	bd->max_queue = BSG_DEFAULT_CMDS;
+
+	INIT_LIST_HEAD(&bd->busy_list);
+	INIT_LIST_HEAD(&bd->done_list);
+	INIT_HLIST_NODE(&bd->dev_list);
+
+	init_waitqueue_head(&bd->wq_free);
+	init_waitqueue_head(&bd->wq_done);
+	return bd;
+}
+
+static int bsg_put_device(struct bsg_device *bd)
+{
+	int ret = 0;
+
+	mutex_lock(&bsg_mutex);
+
+	if (!atomic_dec_and_test(&bd->ref_count))
+		goto out;
+
+	dprintk("%s: tearing down\n", bd->name);
+
+	/*
+	 * close can always block
+	 */
+	set_bit(BSG_F_BLOCK, &bd->flags);
+
+	/*
+	 * correct error detection baddies here again. it's the responsibility
+	 * of the app to properly reap commands before close() if it wants
+	 * fool-proof error detection
+	 */
+	ret = bsg_complete_all_commands(bd);
+
+	blk_put_queue(bd->queue);
+	hlist_del(&bd->dev_list);
+	kfree(bd);
+out:
+	mutex_unlock(&bsg_mutex);
+	return ret;
+}
+
+static struct bsg_device *bsg_add_device(struct inode *inode,
+					 struct request_queue *rq,
+					 struct file *file)
+{
+	struct bsg_device *bd = NULL;
+#ifdef BSG_DEBUG
+	unsigned char buf[32];
+#endif
+
+	bd = bsg_alloc_device();
+	if (!bd)
+		return ERR_PTR(-ENOMEM);
+
+	bd->queue = rq;
+	kobject_get(&rq->kobj);
+	bsg_set_block(bd, file);
+
+	atomic_set(&bd->ref_count, 1);
+	bd->minor = iminor(inode);
+	mutex_lock(&bsg_mutex);
+	hlist_add_head(&bd->dev_list, &bsg_device_list[bsg_list_idx(bd->minor)]);
+
+	strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1);
+	dprintk("bound to <%s>, max queue %d\n",
+		format_dev_t(buf, inode->i_rdev), bd->max_queue);
+
+	mutex_unlock(&bsg_mutex);
+	return bd;
+}
+
+static struct bsg_device *__bsg_get_device(int minor)
+{
+	struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)];
+	struct bsg_device *bd = NULL;
+	struct hlist_node *entry;
+
+	mutex_lock(&bsg_mutex);
+
+	hlist_for_each(entry, list) {
+		bd = hlist_entry(entry, struct bsg_device, dev_list);
+		if (bd->minor == minor) {
+			atomic_inc(&bd->ref_count);
+			break;
+		}
+
+		bd = NULL;
+	}
+
+	mutex_unlock(&bsg_mutex);
+	return bd;
+}
+
+static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
+{
+	struct bsg_device *bd = __bsg_get_device(iminor(inode));
+	struct bsg_class_device *bcd, *__bcd;
+
+	if (bd)
+		return bd;
+
+	/*
+	 * find the class device
+	 */
+	bcd = NULL;
+	mutex_lock(&bsg_mutex);
+	list_for_each_entry(__bcd, &bsg_class_list, list) {
+		if (__bcd->minor == iminor(inode)) {
+			bcd = __bcd;
+			break;
+		}
+	}
+	mutex_unlock(&bsg_mutex);
+
+	if (!bcd)
+		return ERR_PTR(-ENODEV);
+
+	return bsg_add_device(inode, bcd->queue, file);
+}
+
+static int bsg_open(struct inode *inode, struct file *file)
+{
+	struct bsg_device *bd = bsg_get_device(inode, file);
+
+	if (IS_ERR(bd))
+		return PTR_ERR(bd);
+
+	file->private_data = bd;
+	return 0;
+}
+
+static int bsg_release(struct inode *inode, struct file *file)
+{
+	struct bsg_device *bd = file->private_data;
+
+	file->private_data = NULL;
+	return bsg_put_device(bd);
+}
+
+static unsigned int bsg_poll(struct file *file, poll_table *wait)
+{
+	struct bsg_device *bd = file->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(file, &bd->wq_done, wait);
+	poll_wait(file, &bd->wq_free, wait);
+
+	spin_lock_irq(&bd->lock);
+	if (!list_empty(&bd->done_list))
+		mask |= POLLIN | POLLRDNORM;
+	if (bd->queued_cmds >= bd->max_queue)
+		mask |= POLLOUT;
+	spin_unlock_irq(&bd->lock);
+
+	return mask;
+}
+
+static int
+bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+	  unsigned long arg)
+{
+	struct bsg_device *bd = file->private_data;
+	int __user *uarg = (int __user *) arg;
+
+	if (!bd)
+		return -ENXIO;
+
+	switch (cmd) {
+		/*
+		 * our own ioctls
+		 */
+	case SG_GET_COMMAND_Q:
+		return put_user(bd->max_queue, uarg);
+	case SG_SET_COMMAND_Q: {
+		int queue;
+
+		if (get_user(queue, uarg))
+			return -EFAULT;
+		if (queue < 1)
+			return -EINVAL;
+
+		spin_lock_irq(&bd->lock);
+		bd->max_queue = queue;
+		spin_unlock_irq(&bd->lock);
+		return 0;
+	}
+
+	/*
+	 * SCSI/sg ioctls
+	 */
+	case SG_GET_VERSION_NUM:
+	case SCSI_IOCTL_GET_IDLUN:
+	case SCSI_IOCTL_GET_BUS_NUMBER:
+	case SG_SET_TIMEOUT:
+	case SG_GET_TIMEOUT:
+	case SG_GET_RESERVED_SIZE:
+	case SG_SET_RESERVED_SIZE:
+	case SG_EMULATED_HOST:
+	case SCSI_IOCTL_SEND_COMMAND: {
+		void __user *uarg = (void __user *) arg;
+		return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
+	}
+	case SG_IO: {
+		struct request *rq;
+		struct bio *bio, *bidi_bio = NULL;
+		struct sg_io_v4 hdr;
+
+		if (copy_from_user(&hdr, uarg, sizeof(hdr)))
+			return -EFAULT;
+
+		rq = bsg_map_hdr(bd, &hdr);
+		if (IS_ERR(rq))
+			return PTR_ERR(rq);
+
+		bio = rq->bio;
+		if (rq->next_rq)
+			bidi_bio = rq->next_rq->bio;
+		blk_execute_rq(bd->queue, NULL, rq, 0);
+		blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
+
+		if (copy_to_user(uarg, &hdr, sizeof(hdr)))
+			return -EFAULT;
+
+		return 0;
+	}
+	/*
+	 * block device ioctls
+	 */
+	default:
+#if 0
+		return ioctl_by_bdev(bd->bdev, cmd, arg);
+#else
+		return -ENOTTY;
+#endif
+	}
+}
+
+static struct file_operations bsg_fops = {
+	.read		=	bsg_read,
+	.write		=	bsg_write,
+	.poll		=	bsg_poll,
+	.open		=	bsg_open,
+	.release	=	bsg_release,
+	.ioctl		=	bsg_ioctl,
+	.owner		=	THIS_MODULE,
+};
+
+void bsg_unregister_queue(struct request_queue *q)
+{
+	struct bsg_class_device *bcd = &q->bsg_dev;
+
+	if (!bcd->class_dev)
+		return;
+
+	mutex_lock(&bsg_mutex);
+	sysfs_remove_link(&q->kobj, "bsg");
+	class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor));
+	bcd->class_dev = NULL;
+	list_del_init(&bcd->list);
+	bsg_device_nr--;
+	mutex_unlock(&bsg_mutex);
+}
+EXPORT_SYMBOL_GPL(bsg_unregister_queue);
+
+int bsg_register_queue(struct request_queue *q, const char *name)
+{
+	struct bsg_class_device *bcd, *__bcd;
+	dev_t dev;
+	int ret = -EMFILE;
+	struct class_device *class_dev = NULL;
+
+	/*
+	 * we need a proper transport to send commands, not a stacked device
+	 */
+	if (!q->request_fn)
+		return 0;
+
+	bcd = &q->bsg_dev;
+	memset(bcd, 0, sizeof(*bcd));
+	INIT_LIST_HEAD(&bcd->list);
+
+	mutex_lock(&bsg_mutex);
+	if (bsg_device_nr == BSG_MAX_DEVS) {
+		printk(KERN_ERR "bsg: too many bsg devices\n");
+		goto err;
+	}
+
+retry:
+	list_for_each_entry(__bcd, &bsg_class_list, list) {
+		if (__bcd->minor == bsg_minor_idx) {
+			bsg_minor_idx++;
+			if (bsg_minor_idx == BSG_MAX_DEVS)
+				bsg_minor_idx = 0;
+			goto retry;
+		}
+	}
+
+	bcd->minor = bsg_minor_idx++;
+	if (bsg_minor_idx == BSG_MAX_DEVS)
+		bsg_minor_idx = 0;
+
+	bcd->queue = q;
+	dev = MKDEV(BSG_MAJOR, bcd->minor);
+	class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", name);
+	if (IS_ERR(class_dev)) {
+		ret = PTR_ERR(class_dev);
+		goto err;
+	}
+	bcd->class_dev = class_dev;
+
+	if (q->kobj.dentry) {
+		ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
+		if (ret)
+			goto err;
+	}
+
+	list_add_tail(&bcd->list, &bsg_class_list);
+	bsg_device_nr++;
+
+	mutex_unlock(&bsg_mutex);
+	return 0;
+err:
+	if (class_dev)
+		class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor));
+	mutex_unlock(&bsg_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(bsg_register_queue);
+
+static int bsg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
+{
+	int ret;
+	struct scsi_device *sdp = to_scsi_device(cl_dev->dev);
+	struct request_queue *rq = sdp->request_queue;
+
+	if (rq->kobj.parent)
+		ret = bsg_register_queue(rq, kobject_name(rq->kobj.parent));
+	else
+		ret = bsg_register_queue(rq, kobject_name(&sdp->sdev_gendev.kobj));
+	return ret;
+}
+
+static void bsg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
+{
+	bsg_unregister_queue(to_scsi_device(cl_dev->dev)->request_queue);
+}
+
+static struct class_interface bsg_intf = {
+	.add	= bsg_add,
+	.remove	= bsg_remove,
+};
+
+static struct cdev bsg_cdev = {
+	.kobj   = {.name = "bsg", },
+	.owner  = THIS_MODULE,
+};
+
+static int __init bsg_init(void)
+{
+	int ret, i;
+
+	bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
+				sizeof(struct bsg_command), 0, 0, NULL, NULL);
+	if (!bsg_cmd_cachep) {
+		printk(KERN_ERR "bsg: failed creating slab cache\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < BSG_LIST_SIZE; i++)
+		INIT_HLIST_HEAD(&bsg_device_list[i]);
+
+	bsg_class = class_create(THIS_MODULE, "bsg");
+	if (IS_ERR(bsg_class)) {
+		kmem_cache_destroy(bsg_cmd_cachep);
+		return PTR_ERR(bsg_class);
+	}
+
+	ret = register_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS, "bsg");
+	if (ret) {
+		kmem_cache_destroy(bsg_cmd_cachep);
+		class_destroy(bsg_class);
+		return ret;
+	}
+
+	cdev_init(&bsg_cdev, &bsg_fops);
+	ret = cdev_add(&bsg_cdev, MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS);
+	if (ret) {
+		kmem_cache_destroy(bsg_cmd_cachep);
+		class_destroy(bsg_class);
+		unregister_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS);
+		return ret;
+	}
+
+	ret = scsi_register_interface(&bsg_intf);
+	if (ret) {
+		printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret);
+		kmem_cache_destroy(bsg_cmd_cachep);
+		class_destroy(bsg_class);
+		unregister_chrdev(BSG_MAJOR, "bsg");
+		return ret;
+	}
+
+	printk(KERN_INFO "%s loaded\n", bsg_version);
+	return 0;
+}
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver");
+MODULE_LICENSE("GPL");
+
+device_initcall(bsg_init);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index ef42bb2b12b6..11e4235d0b0c 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -256,6 +256,7 @@ static void rq_init(request_queue_t *q, struct request *rq)
 	rq->end_io = NULL;
 	rq->end_io_data = NULL;
 	rq->completion_data = NULL;
+	rq->next_rq = NULL;
 }
 
 /**
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e83f1dbf7c29..a26ba07955fe 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -41,8 +41,6 @@ const unsigned char scsi_command_size[8] =
 
 EXPORT_SYMBOL(scsi_command_size);
 
-#define BLK_DEFAULT_TIMEOUT	(60 * HZ)
-
 #include <scsi/sg.h>
 
 static int sg_get_version(int __user *p)
@@ -114,7 +112,7 @@ static int sg_emulated_host(request_queue_t *q, int __user *p)
 #define safe_for_read(cmd)	[cmd] = CMD_READ_SAFE
 #define safe_for_write(cmd)	[cmd] = CMD_WRITE_SAFE
 
-static int verify_command(struct file *file, unsigned char *cmd)
+int blk_verify_command(unsigned char *cmd, int has_write_perm)
 {
 	static unsigned char cmd_type[256] = {
 
@@ -193,18 +191,11 @@ static int verify_command(struct file *file, unsigned char *cmd)
 		safe_for_write(GPCMD_SET_STREAMING),
 	};
 	unsigned char type = cmd_type[cmd[0]];
-	int has_write_perm = 0;
 
 	/* Anybody who can open the device can do a read-safe command */
 	if (type & CMD_READ_SAFE)
 		return 0;
 
-	/*
-	 * file can be NULL from ioctl_by_bdev()...
-	 */
-	if (file)
-		has_write_perm = file->f_mode & FMODE_WRITE;
-
 	/* Write-safe commands just require a writable open.. */
 	if ((type & CMD_WRITE_SAFE) && has_write_perm)
 		return 0;
@@ -221,25 +212,96 @@ static int verify_command(struct file *file, unsigned char *cmd)
 	/* Otherwise fail it with an "Operation not permitted" */
 	return -EPERM;
 }
+EXPORT_SYMBOL_GPL(blk_verify_command);
+
+int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq,
+		      struct sg_io_hdr *hdr, int has_write_perm)
+{
+	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+
+	if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
+		return -EFAULT;
+	if (blk_verify_command(rq->cmd, has_write_perm))
+		return -EPERM;
+
+	/*
+	 * fill in request structure
+	 */
+	rq->cmd_len = hdr->cmd_len;
+	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+	rq->timeout = (hdr->timeout * HZ) / 1000;
+	if (!rq->timeout)
+		rq->timeout = q->sg_timeout;
+	if (!rq->timeout)
+		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(blk_fill_sghdr_rq);
+
+/*
+ * unmap a request that was previously mapped to this sg_io_hdr. handles
+ * both sg and non-sg sg_io_hdr.
+ */
+int blk_unmap_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr)
+{
+	blk_rq_unmap_user(rq->bio);
+	blk_put_request(rq);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(blk_unmap_sghdr_rq);
+
+int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
+			  struct bio *bio)
+{
+	int r, ret = 0;
+
+	/*
+	 * fill in all the output members
+	 */
+	hdr->status = rq->errors & 0xff;
+	hdr->masked_status = status_byte(rq->errors);
+	hdr->msg_status = msg_byte(rq->errors);
+	hdr->host_status = host_byte(rq->errors);
+	hdr->driver_status = driver_byte(rq->errors);
+	hdr->info = 0;
+	if (hdr->masked_status || hdr->host_status || hdr->driver_status)
+		hdr->info |= SG_INFO_CHECK;
+	hdr->resid = rq->data_len;
+	hdr->sb_len_wr = 0;
+
+	if (rq->sense_len && hdr->sbp) {
+		int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
+
+		if (!copy_to_user(hdr->sbp, rq->sense, len))
+			hdr->sb_len_wr = len;
+		else
+			ret = -EFAULT;
+	}
+
+	rq->bio = bio;
+	r = blk_unmap_sghdr_rq(rq, hdr);
+	if (ret)
+		r = ret;
+
+	return r;
+}
+EXPORT_SYMBOL_GPL(blk_complete_sghdr_rq);
 
 static int sg_io(struct file *file, request_queue_t *q,
 		struct gendisk *bd_disk, struct sg_io_hdr *hdr)
 {
-	unsigned long start_time, timeout;
-	int writing = 0, ret = 0;
+	unsigned long start_time;
+	int writing = 0, ret = 0, has_write_perm = 0;
 	struct request *rq;
 	char sense[SCSI_SENSE_BUFFERSIZE];
-	unsigned char cmd[BLK_MAX_CDB];
 	struct bio *bio;
 
 	if (hdr->interface_id != 'S')
 		return -EINVAL;
 	if (hdr->cmd_len > BLK_MAX_CDB)
 		return -EINVAL;
-	if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
-		return -EFAULT;
-	if (verify_command(file, cmd))
-		return -EPERM;
 
 	if (hdr->dxfer_len > (q->max_hw_sectors << 9))
 		return -EIO;
@@ -260,25 +322,13 @@ static int sg_io(struct file *file, request_queue_t *q,
 	if (!rq)
 		return -ENOMEM;
 
-	/*
-	 * fill in request structure
-	 */
-	rq->cmd_len = hdr->cmd_len;
-	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
-	memcpy(rq->cmd, cmd, hdr->cmd_len);
-
-	memset(sense, 0, sizeof(sense));
-	rq->sense = sense;
-	rq->sense_len = 0;
-
-	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+	if (file)
+		has_write_perm = file->f_mode & FMODE_WRITE;
 
-	timeout = msecs_to_jiffies(hdr->timeout);
-	rq->timeout = (timeout < INT_MAX) ? timeout : INT_MAX;
-	if (!rq->timeout)
-		rq->timeout = q->sg_timeout;
-	if (!rq->timeout)
-		rq->timeout = BLK_DEFAULT_TIMEOUT;
+	if (blk_fill_sghdr_rq(q, rq, hdr, has_write_perm)) {
+		blk_put_request(rq);
+		return -EFAULT;
+	}
 
 	if (hdr->iovec_count) {
 		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
@@ -306,6 +356,9 @@ static int sg_io(struct file *file, request_queue_t *q,
 		goto out;
 
 	bio = rq->bio;
+	memset(sense, 0, sizeof(sense));
+	rq->sense = sense;
+	rq->sense_len = 0;
 	rq->retries = 0;
 
 	start_time = jiffies;
@@ -316,31 +369,9 @@ static int sg_io(struct file *file, request_queue_t *q,
 	 */
 	blk_execute_rq(q, bd_disk, rq, 0);
 
-	/* write to all output members */
-	hdr->status = 0xff & rq->errors;
-	hdr->masked_status = status_byte(rq->errors);
-	hdr->msg_status = msg_byte(rq->errors);
-	hdr->host_status = host_byte(rq->errors);
-	hdr->driver_status = driver_byte(rq->errors);
-	hdr->info = 0;
-	if (hdr->masked_status || hdr->host_status || hdr->driver_status)
-		hdr->info |= SG_INFO_CHECK;
-	hdr->resid = rq->data_len;
 	hdr->duration = ((jiffies - start_time) * 1000) / HZ;
-	hdr->sb_len_wr = 0;
-
-	if (rq->sense_len && hdr->sbp) {
-		int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
-
-		if (!copy_to_user(hdr->sbp, rq->sense, len))
-			hdr->sb_len_wr = len;
-	}
 
-	if (blk_rq_unmap_user(bio))
-		ret = -EFAULT;
-
-	/* may not have succeeded, but output values written to control
-	 * structure (struct sg_io_hdr).  */
+	return blk_complete_sghdr_rq(rq, hdr, bio);
 out:
 	blk_put_request(rq);
 	return ret;
@@ -427,7 +458,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
 	if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
 		goto error;
 
-	err = verify_command(file, rq->cmd);
+	err = blk_verify_command(rq->cmd, file->f_mode & FMODE_WRITE);
 	if (err)
 		goto error;
 
@@ -454,7 +485,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
 		rq->retries = 1;
 		break;
 	default:
-		rq->timeout = BLK_DEFAULT_TIMEOUT;
+		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
 		break;
 	}
 
@@ -501,7 +532,7 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c
 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
 	rq->data = NULL;
 	rq->data_len = 0;
-	rq->timeout = BLK_DEFAULT_TIMEOUT;
+	rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
 	memset(rq->cmd, 0, sizeof(rq->cmd));
 	rq->cmd[0] = cmd;
 	rq->cmd[4] = data;
@@ -517,16 +548,12 @@ static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_dis
 	return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
 }
 
-int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
+int scsi_cmd_ioctl(struct file *file, struct request_queue *q,
+		   struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
 {
-	request_queue_t *q;
 	int err;
 
-	q = bd_disk->queue;
-	if (!q)
-		return -ENXIO;
-
-	if (blk_get_queue(q))
+	if (!q || blk_get_queue(q))
 		return -ENXIO;
 
 	switch (cmd) {
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 18c8b6c0db20..8b13d7d2cb63 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1709,7 +1709,7 @@ static int ub_bd_ioctl(struct inode *inode, struct file *filp,
 	struct gendisk *disk = inode->i_bdev->bd_disk;
 	void __user *usermem = (void __user *) arg;
 
-	return scsi_cmd_ioctl(filp, disk, cmd, usermem);
+	return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem);
 }
 
 /*
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index aa5468f487ba..499019bf8f40 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2695,11 +2695,12 @@ int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
 {
 	void __user *argp = (void __user *)arg;
 	int ret;
+	struct gendisk *disk = ip->i_bdev->bd_disk;
 
 	/*
 	 * Try the generic SCSI command ioctl's first.
 	 */
-	ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, argp);
+	ret = scsi_cmd_ioctl(file, disk->queue, disk, cmd, argp);
 	if (ret != -ENOTTY)
 		return ret;
 
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index f429be88c4f9..a21f585b1caa 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -1258,19 +1258,25 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
 	set_bit(PC_DMA_RECOMMENDED, &pc->flags);
 }
 
-static int
+static void
 idefloppy_blockpc_cmd(idefloppy_floppy_t *floppy, idefloppy_pc_t *pc, struct request *rq)
 {
-	/*
-	 * just support eject for now, it would not be hard to make the
-	 * REQ_BLOCK_PC support fully-featured
-	 */
-	if (rq->cmd[0] != IDEFLOPPY_START_STOP_CMD)
-		return 1;
-
 	idefloppy_init_pc(pc);
+	pc->callback = &idefloppy_rw_callback;
 	memcpy(pc->c, rq->cmd, sizeof(pc->c));
-	return 0;
+	pc->rq = rq;
+	pc->b_count = rq->data_len;
+	if (rq->data_len && rq_data_dir(rq) == WRITE)
+		set_bit(PC_WRITING, &pc->flags);
+	pc->buffer = rq->data;
+	if (rq->bio)
+		set_bit(PC_DMA_RECOMMENDED, &pc->flags);
+		
+	/*
+	 * possibly problematic, doesn't look like ide-floppy correctly
+	 * handled scattered requests if dma fails...
+	 */
+	pc->request_transfer = pc->buffer_size = rq->data_len;
 }
 
 /*
@@ -1317,10 +1323,7 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
 		pc = (idefloppy_pc_t *) rq->buffer;
 	} else if (blk_pc_request(rq)) {
 		pc = idefloppy_next_pc_storage(drive);
-		if (idefloppy_blockpc_cmd(floppy, pc, rq)) {
-			idefloppy_do_end_request(drive, 0, 0);
-			return ide_stopped;
-		}
+		idefloppy_blockpc_cmd(floppy, pc, rq);
 	} else {
 		blk_dump_rq_flags(rq,
 			"ide-floppy: unsupported command in queue");
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index c948a5c17a5d..8cd7694593c9 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1049,9 +1049,13 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
 	unsigned long flags;
 	ide_driver_t *drv;
 	void __user *p = (void __user *)arg;
-	int err = 0, (*setfunc)(ide_drive_t *, int);
+	int err, (*setfunc)(ide_drive_t *, int);
 	u8 *val;
 
+	err = scsi_cmd_ioctl(file, bdev->bd_disk->queue, bdev->bd_disk, cmd, p);
+	if (err != -ENOTTY)
+		return err;
+
 	switch (cmd) {
 	case HDIO_GET_32BIT:	    val = &drive->io_32bit;	 goto read_val;
 	case HDIO_GET_KEEPSETTINGS: val = &drive->keep_settings; goto read_val;
@@ -1171,10 +1175,6 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
 			return 0;
 		}
 
-		case CDROMEJECT:
-		case CDROMCLOSETRAY:
-			return scsi_cmd_ioctl(file, bdev->bd_disk, cmd, p);
-
 		case HDIO_GET_BUSSTATE:
 			if (!capable(CAP_SYS_ADMIN))
 				return -EACCES;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 448d316f12d7..424d557284a9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -684,7 +684,7 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
 		case SCSI_IOCTL_GET_BUS_NUMBER:
 			return scsi_ioctl(sdp, cmd, p);
 		default:
-			error = scsi_cmd_ioctl(filp, disk, cmd, p);
+			error = scsi_cmd_ioctl(filp, disk->queue, disk, cmd, p);
 			if (error != -ENOTTY)
 				return error;
 	}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 55bfeccf68a2..a4f7b8465773 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3549,7 +3549,8 @@ static int st_ioctl(struct inode *inode, struct file *file,
 			    !capable(CAP_SYS_RAWIO))
 				i = -EPERM;
 			else
-				i = scsi_cmd_ioctl(file, STp->disk, cmd_in, p);
+				i = scsi_cmd_ioctl(file, STp->disk->queue,
+						   STp->disk, cmd_in, p);
 			if (i != -ENOTTY)
 				return i;
 			break;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fae138bd2207..b32564a1e105 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -14,6 +14,7 @@
 #include <linux/bio.h>
 #include <linux/module.h>
 #include <linux/stringify.h>
+#include <linux/bsg.h>
 
 #include <asm/scatterlist.h>
 
@@ -41,6 +42,8 @@ struct elevator_queue;
 typedef struct elevator_queue elevator_t;
 struct request_pm_state;
 struct blk_trace;
+struct request;
+struct sg_io_hdr;
 
 #define BLKDEV_MIN_RQ	4
 #define BLKDEV_MAX_RQ	128	/* Default maximum */
@@ -314,6 +317,9 @@ struct request {
 	 */
 	rq_end_io_fn *end_io;
 	void *end_io_data;
+
+	/* for bidi */
+	struct request *next_rq;
 };
 
 /*
@@ -468,6 +474,10 @@ struct request_queue
 	unsigned int		bi_size;
 
 	struct mutex		sysfs_lock;
+
+#if defined(CONFIG_BLK_DEV_BSG)
+	struct bsg_class_device bsg_dev;
+#endif
 };
 
 #define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */
@@ -479,6 +489,7 @@ struct request_queue
 #define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */
 #define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */
 #define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BIDI		9	/* queue supports bidi requests */
 
 enum {
 	/*
@@ -543,6 +554,7 @@ enum {
 #define blk_sorted_rq(rq)	((rq)->cmd_flags & REQ_SORTED)
 #define blk_barrier_rq(rq)	((rq)->cmd_flags & REQ_HARDBARRIER)
 #define blk_fua_rq(rq)		((rq)->cmd_flags & REQ_FUA)
+#define blk_bidi_rq(rq)		((rq)->next_rq != NULL)
 
 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
 
@@ -607,6 +619,11 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
 #define BLK_BOUNCE_ANY		((u64)blk_max_pfn << PAGE_SHIFT)
 #define BLK_BOUNCE_ISA		(ISA_DMA_THRESHOLD)
 
+/*
+ * default timeout for SG_IO if none specified
+ */
+#define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
+
 #ifdef CONFIG_MMU
 extern int init_emergency_isa_pool(void);
 extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
@@ -637,7 +654,8 @@ extern void blk_requeue_request(request_queue_t *, struct request *);
 extern void blk_plug_device(request_queue_t *);
 extern int blk_remove_plug(request_queue_t *);
 extern void blk_recount_segments(request_queue_t *, struct bio *);
-extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
+extern int scsi_cmd_ioctl(struct file *, struct request_queue *,
+			  struct gendisk *, unsigned int, void __user *);
 extern int sg_scsi_ioctl(struct file *, struct request_queue *,
 		struct gendisk *, struct scsi_ioctl_command __user *);
 
@@ -680,6 +698,12 @@ extern int blk_execute_rq(request_queue_t *, struct gendisk *,
 			  struct request *, int);
 extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
 				  struct request *, int, rq_end_io_fn *);
+extern int blk_fill_sghdr_rq(request_queue_t *, struct request *,
+		      struct sg_io_hdr *, int);
+extern int blk_unmap_sghdr_rq(struct request *, struct sg_io_hdr *);
+extern int blk_complete_sghdr_rq(struct request *, struct sg_io_hdr *,
+			  struct bio *);
+extern int blk_verify_command(unsigned char *, int);
 
 static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
 {
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
new file mode 100644
index 000000000000..bd998ca6cb2e
--- /dev/null
+++ b/include/linux/bsg.h
@@ -0,0 +1,70 @@
+#ifndef BSG_H
+#define BSG_H
+
+#define BSG_PROTOCOL_SCSI		0
+
+#define BSG_SUB_PROTOCOL_SCSI_CMD	0
+#define BSG_SUB_PROTOCOL_SCSI_TMF	1
+#define BSG_SUB_PROTOCOL_SCSI_TRANSPORT	2
+
+struct sg_io_v4 {
+	__s32 guard;		/* [i] 'Q' to differentiate from v3 */
+	__u32 protocol;		/* [i] 0 -> SCSI , .... */
+	__u32 subprotocol;	/* [i] 0 -> SCSI command, 1 -> SCSI task
+				   management function, .... */
+
+	__u32 request_len;	/* [i] in bytes */
+	__u64 request;		/* [i], [*i] {SCSI: cdb} */
+	__u32 request_attr;	/* [i] {SCSI: task attribute} */
+	__u32 request_tag;	/* [i] {SCSI: task tag (only if flagged)} */
+	__u32 request_priority;	/* [i] {SCSI: task priority} */
+	__u32 max_response_len;	/* [i] in bytes */
+	__u64 response;		/* [i], [*o] {SCSI: (auto)sense data} */
+
+	/* "din_" for data in (from device); "dout_" for data out (to device) */
+	__u32 dout_xfer_len;	/* [i] bytes to be transferred to device */
+	__u32 din_xfer_len;	/* [i] bytes to be transferred from device */
+	__u64 dout_xferp;	/* [i], [*i] */
+	__u64 din_xferp;	/* [i], [*o] */
+
+	__u32 timeout;		/* [i] units: millisecond */
+	__u32 flags;		/* [i] bit mask */
+	__u64 usr_ptr;		/* [i->o] unused internally */
+	__u32 spare_in;		/* [i] */
+
+	__u32 driver_status;	/* [o] 0 -> ok */
+	__u32 transport_status;	/* [o] 0 -> ok */
+	__u32 device_status;	/* [o] {SCSI: command completion status} */
+	__u32 retry_delay;	/* [o] {SCSI: status auxiliary information} */
+	__u32 info;		/* [o] additional information */
+	__u32 duration;		/* [o] time to complete, in milliseconds */
+	__u32 response_len;	/* [o] bytes of response actually written */
+	__s32 din_resid;	/* [o] actual_din_xfer_len - din_xfer_len */
+	__u32 generated_tag;	/* [o] {SCSI: task tag that transport chose} */
+	__u32 spare_out;	/* [o] */
+
+	__u32 padding;
+};
+
+#ifdef __KERNEL__
+
+#if defined(CONFIG_BLK_DEV_BSG)
+struct bsg_class_device {
+	struct class_device *class_dev;
+	struct device *dev;
+	int minor;
+	struct list_head list;
+	struct request_queue *queue;
+};
+
+extern int bsg_register_queue(struct request_queue *, const char *);
+extern void bsg_unregister_queue(struct request_queue *);
+#else
+struct bsg_class_device { };
+#define bsg_register_queue(disk, name)		(0)
+#define bsg_unregister_queue(disk)	do { } while (0)
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif