summary refs log tree commit diff
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2011-11-30 18:18:33 -0800
committerNicholas Bellinger <nab@linux-iscsi.org>2011-12-14 11:48:46 +0000
commit4d2300ccffd22d1d0213b6a8e4d685eb6ca069c0 (patch)
treea76817aa4aaba5a6ffb05b3e6a0ea9ca654a0519
parent65586d51e0986be574118286c3d0007e903a2add (diff)
downloadlinux-4d2300ccffd22d1d0213b6a8e4d685eb6ca069c0.tar.gz
target: Remove extra se_device->execute_task_lock access in fast path
This patch makes __transport_execute_tasks() perform the addition of
tasks to dev->execute_task_list via __transport_add_tasks_from_cmd()
while holding dev->execute_task_lock during normal I/O fast path
submission.

It effectively removes the unnecessary re-acquire of dev->execute_task_lock
during transport_execute_tasks() -> transport_add_tasks_from_cmd() ahead
of calling  __transport_execute_tasks() to queue tasks for the passed
*se_cmd descriptor.

(v2: Re-add goto check_depth usage for multi-task submission for now..)

Cc: Christoph Hellwig <hch@lst.de>
Cc: Roland Dreier <roland@purestorage.com>
Cc: Joern Engel <joern@logfs.org>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/target_core_transport.c40
-rw-r--r--include/target/target_core_base.h1
2 files changed, 24 insertions, 17 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7c2def7e2593..46e25118ab25 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -68,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
 
 static int transport_generic_write_pending(struct se_cmd *);
 static int transport_processing_thread(void *param);
-static int __transport_execute_tasks(struct se_device *dev);
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
 static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
 		struct se_device *dev);
@@ -851,13 +851,11 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 }
 
-static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct se_task *task, *task_prev = NULL;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev->execute_task_lock, flags);
 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
 		if (!list_empty(&task->t_execute_list))
 			continue;
@@ -868,6 +866,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
 		__transport_add_task_to_execute_queue(task, task_prev, dev);
 		task_prev = task;
 	}
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+	unsigned long flags;
+	struct se_device *dev = cmd->se_dev;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	__transport_add_tasks_from_cmd(cmd);
 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
@@ -2075,19 +2082,16 @@ static int transport_execute_tasks(struct se_cmd *cmd)
 		if (!add_tasks)
 			goto execute_tasks;
 		/*
-		 * This calls transport_add_tasks_from_cmd() to handle
-		 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
-		 * (if enabled) in __transport_add_task_to_execute_queue() and
-		 * transport_add_task_check_sam_attr().
+		 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
+		 * adds associated se_tasks while holding dev->execute_task_lock
+		 * before I/O dispath to avoid a double spinlock access.
 		 */
-		transport_add_tasks_from_cmd(cmd);
+		__transport_execute_tasks(se_dev, cmd);
+		return 0;
 	}
-	/*
-	 * Kick the execution queue for the cmd associated struct se_device
-	 * storage object.
-	 */
+
 execute_tasks:
-	__transport_execute_tasks(se_dev);
+	__transport_execute_tasks(se_dev, NULL);
 	return 0;
 }
 
@@ -2097,7 +2101,7 @@ execute_tasks:
  *
  * Called from transport_processing_thread()
  */
-static int __transport_execute_tasks(struct se_device *dev)
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
 {
 	int error;
 	struct se_cmd *cmd = NULL;
@@ -2106,6 +2110,9 @@ static int __transport_execute_tasks(struct se_device *dev)
 
 check_depth:
 	spin_lock_irq(&dev->execute_task_lock);
+	if (new_cmd != NULL)
+		__transport_add_tasks_from_cmd(new_cmd);
+
 	if (list_empty(&dev->execute_task_list)) {
 		spin_unlock_irq(&dev->execute_task_lock);
 		return 0;
@@ -2139,6 +2146,7 @@ check_depth:
 		transport_generic_request_failure(cmd);
 	}
 
+	new_cmd = NULL;
 	goto check_depth;
 
 	return 0;
@@ -4647,7 +4655,7 @@ static int transport_processing_thread(void *param)
 			goto out;
 
 get_cmd:
-		__transport_execute_tasks(dev);
+		__transport_execute_tasks(dev, NULL);
 
 		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
 		if (!cmd)
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index cd4caf3a598f..a74cb2be74de 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -781,7 +781,6 @@ struct se_device {
 	u32			dev_port_count;
 	/* See transport_device_status_table */
 	u32			dev_status;
-	u32			dev_tcq_window_closed;
 	/* Physical device queue depth */
 	u32			queue_depth;
 	/* Used for SPC-2 reservations enforce of ISIDs */