summary refs log tree commit diff
path: root/drivers/scsi
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <m.chehab@samsung.com>2014-03-11 06:55:49 -0300
committerMauro Carvalho Chehab <m.chehab@samsung.com>2014-03-11 06:55:49 -0300
commitc897df0e2dbc81bcc09c11425658d69830825364 (patch)
tree04c268e4951f2b75acb6873307673e45d05a635e /drivers/scsi
parent1e9c4d49020996a645a535cbb8f1ff78b9b120f3 (diff)
parent0414855fdc4a40da05221fc6062cccbc0c30f169 (diff)
downloadlinux-c897df0e2dbc81bcc09c11425658d69830825364.tar.gz
Merge tag 'v3.14-rc5' into patchwork
Linux 3.14-rc5

* tag 'v3.14-rc5': (1117 commits)
  Linux 3.14-rc5
  drm/vmwgfx: avoid null pointer dereference at failure paths
  drm/vmwgfx: Make sure backing mobs are cleared when allocated. Update driver date.
  drm/vmwgfx: Remove some unused surface formats
  MAINTAINERS: add maintainer entry for Armada DRM driver
  arm64: Fix !CONFIG_SMP kernel build
  arm64: mm: Add double logical invert to pte accessors
  dm cache: fix truncation bug when mapping I/O to >2TB fast device
  perf tools: Fix strict alias issue for find_first_bit
  powerpc/powernv: Fix indirect XSCOM unmangling
  powerpc/powernv: Fix opal_xscom_{read,write} prototype
  powerpc/powernv: Refactor PHB diag-data dump
  powerpc/powernv: Dump PHB diag-data immediately
  powerpc: Increase stack redzone for 64-bit userspace to 512 bytes
  powerpc/ftrace: bugfix for test_24bit_addr
  powerpc/crashdump : Fix page frame number check in copy_oldmem_page
  powerpc/le: Ensure that the 'stop-self' RTAS token is handled correctly
  kvm, vmx: Really fix lazy FPU on nested guest
  perf tools: fix BFD detection on opensuse
  drm/radeon: enable speaker allocation setup on dce3.2
  ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c158
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h7
-rw-r--r--drivers/scsi/scsi_lib.c2
5 files changed, 156 insertions, 58 deletions
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 9e80d61e5a3a..0cb73074c199 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -790,17 +790,32 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
 }
 
 /* Called by tcm_qla2xxx configfs code */
-void qlt_stop_phase1(struct qla_tgt *tgt)
+int qlt_stop_phase1(struct qla_tgt *tgt)
 {
 	struct scsi_qla_host *vha = tgt->vha;
 	struct qla_hw_data *ha = tgt->ha;
 	unsigned long flags;
 
+	mutex_lock(&qla_tgt_mutex);
+	if (!vha->fc_vport) {
+		struct Scsi_Host *sh = vha->host;
+		struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
+		bool npiv_vports;
+
+		spin_lock_irqsave(sh->host_lock, flags);
+		npiv_vports = (fc_host->npiv_vports_inuse);
+		spin_unlock_irqrestore(sh->host_lock, flags);
+
+		if (npiv_vports) {
+			mutex_unlock(&qla_tgt_mutex);
+			return -EPERM;
+		}
+	}
 	if (tgt->tgt_stop || tgt->tgt_stopped) {
 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
 		    "Already in tgt->tgt_stop or tgt_stopped state\n");
-		dump_stack();
-		return;
+		mutex_unlock(&qla_tgt_mutex);
+		return -EPERM;
 	}
 
 	ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
@@ -815,6 +830,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
 	qlt_clear_tgt_db(tgt, true);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	mutex_unlock(&vha->vha_tgt.tgt_mutex);
+	mutex_unlock(&qla_tgt_mutex);
 
 	flush_delayed_work(&tgt->sess_del_work);
 
@@ -841,6 +857,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
 
 	/* Wait for sessions to clear out (just in case) */
 	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+	return 0;
 }
 EXPORT_SYMBOL(qlt_stop_phase1);
 
@@ -2595,8 +2612,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
 		return -ENOMEM;
 	}
 
-	INIT_LIST_HEAD(&cmd->cmd_list);
-
 	memcpy(&cmd->atio, atio, sizeof(*atio));
 	cmd->state = QLA_TGT_STATE_NEW;
 	cmd->tgt = vha->vha_tgt.qla_tgt;
@@ -3187,7 +3202,8 @@ restart:
 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
 		    "SRR cmd %p (se_cmd %p, tag %d, op %x), "
 		    "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
-		    se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+		    se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+		    cmd->sg_cnt, cmd->offset);
 
 		qlt_handle_srr(vha, sctio, imm);
 
@@ -4183,6 +4199,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
 	tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
 	tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
 
+	if (base_vha->fc_vport)
+		return 0;
+
 	mutex_lock(&qla_tgt_mutex);
 	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
 	mutex_unlock(&qla_tgt_mutex);
@@ -4196,6 +4215,10 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
 	if (!vha->vha_tgt.qla_tgt)
 		return 0;
 
+	if (vha->fc_vport) {
+		qlt_release(vha->vha_tgt.qla_tgt);
+		return 0;
+	}
 	mutex_lock(&qla_tgt_mutex);
 	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
 	mutex_unlock(&qla_tgt_mutex);
@@ -4267,6 +4290,12 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
 			continue;
 		}
+		if (tgt->tgt_stop) {
+			pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
+				 host->host_no);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			continue;
+		}
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 		if (!scsi_host_get(host)) {
@@ -4281,12 +4310,11 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
 			scsi_host_put(host);
 			continue;
 		}
-		mutex_unlock(&qla_tgt_mutex);
-
 		rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
 		if (rc != 0)
 			scsi_host_put(host);
 
+		mutex_unlock(&qla_tgt_mutex);
 		return rc;
 	}
 	mutex_unlock(&qla_tgt_mutex);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 1d10eecad499..ce33d8c26406 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -855,7 +855,6 @@ struct qla_tgt_cmd {
 	uint16_t loop_id;	/* to save extra sess dereferences */
 	struct qla_tgt *tgt;	/* to save extra sess dereferences */
 	struct scsi_qla_host *vha;
-	struct list_head cmd_list;
 
 	struct atio_from_isp atio;
 };
@@ -1002,7 +1001,7 @@ extern void qlt_modify_vp_config(struct scsi_qla_host *,
 extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
 extern int qlt_mem_alloc(struct qla_hw_data *);
 extern void qlt_mem_free(struct qla_hw_data *);
-extern void qlt_stop_phase1(struct qla_tgt *);
+extern int qlt_stop_phase1(struct qla_tgt *);
 extern void qlt_stop_phase2(struct qla_tgt *);
 extern irqreturn_t qla83xx_msix_atio_q(int, void *);
 extern void qlt_83xx_iospace_config(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 75a141bbe74d..788c4fe2b0c9 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -182,20 +182,6 @@ static int tcm_qla2xxx_npiv_parse_wwn(
 	return 0;
 }
 
-static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
-					u64 wwpn, u64 wwnn)
-{
-	u8 b[8], b2[8];
-
-	put_unaligned_be64(wwpn, b);
-	put_unaligned_be64(wwnn, b2);
-	return snprintf(buf, len,
-		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
-		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
-		b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
-		b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
-}
-
 static char *tcm_qla2xxx_npiv_get_fabric_name(void)
 {
 	return "qla2xxx_npiv";
@@ -227,15 +213,6 @@ static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
 	return lport->lport_naa_name;
 }
 
-static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
-{
-	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
-				struct tcm_qla2xxx_tpg, se_tpg);
-	struct tcm_qla2xxx_lport *lport = tpg->lport;
-
-	return &lport->lport_npiv_name[0];
-}
-
 static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
 {
 	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -941,15 +918,41 @@ static ssize_t tcm_qla2xxx_tpg_show_enable(
 			atomic_read(&tpg->lport_tpg_enabled));
 }
 
+static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
+{
+	struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+				struct tcm_qla2xxx_tpg, tpg_base_work);
+	struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+	struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+	if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
+				  &se_tpg->tpg_group.cg_item)) {
+		atomic_set(&base_tpg->lport_tpg_enabled, 1);
+		qlt_enable_vha(base_vha);
+	}
+	complete(&base_tpg->tpg_base_comp);
+}
+
+static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
+{
+	struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+				struct tcm_qla2xxx_tpg, tpg_base_work);
+	struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+	struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+	if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
+		atomic_set(&base_tpg->lport_tpg_enabled, 0);
+		configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
+				       &se_tpg->tpg_group.cg_item);
+	}
+	complete(&base_tpg->tpg_base_comp);
+}
+
 static ssize_t tcm_qla2xxx_tpg_store_enable(
 	struct se_portal_group *se_tpg,
 	const char *page,
 	size_t count)
 {
-	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
-	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
-			struct tcm_qla2xxx_lport, lport_wwn);
-	struct scsi_qla_host *vha = lport->qla_vha;
 	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
 			struct tcm_qla2xxx_tpg, se_tpg);
 	unsigned long op;
@@ -964,19 +967,28 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
 		pr_err("Illegal value for tpg_enable: %lu\n", op);
 		return -EINVAL;
 	}
-
 	if (op) {
-		atomic_set(&tpg->lport_tpg_enabled, 1);
-		qlt_enable_vha(vha);
+		if (atomic_read(&tpg->lport_tpg_enabled))
+			return -EEXIST;
+
+		INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
 	} else {
-		if (!vha->vha_tgt.qla_tgt) {
-			pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n");
-			return -ENODEV;
-		}
-		atomic_set(&tpg->lport_tpg_enabled, 0);
-		qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+		if (!atomic_read(&tpg->lport_tpg_enabled))
+			return count;
+
+		INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
 	}
+	init_completion(&tpg->tpg_base_comp);
+	schedule_work(&tpg->tpg_base_work);
+	wait_for_completion(&tpg->tpg_base_comp);
 
+	if (op) {
+		if (!atomic_read(&tpg->lport_tpg_enabled))
+			return -ENODEV;
+	} else {
+		if (atomic_read(&tpg->lport_tpg_enabled))
+			return -EPERM;
+	}
 	return count;
 }
 
@@ -1053,11 +1065,64 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
 	/*
 	 * Clear local TPG=1 pointer for non NPIV mode.
 	 */
-		lport->tpg_1 = NULL;
-
+	lport->tpg_1 = NULL;
 	kfree(tpg);
 }
 
+static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
+}
+
+static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+	unsigned long op;
+	int rc;
+
+	rc = kstrtoul(page, 0, &op);
+	if (rc < 0) {
+		pr_err("kstrtoul() returned %d\n", rc);
+		return -EINVAL;
+	}
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %lu\n", op);
+		return -EINVAL;
+	}
+	if (op) {
+		if (atomic_read(&tpg->lport_tpg_enabled))
+			return -EEXIST;
+
+		atomic_set(&tpg->lport_tpg_enabled, 1);
+		qlt_enable_vha(vha);
+	} else {
+		if (!atomic_read(&tpg->lport_tpg_enabled))
+			return count;
+
+		atomic_set(&tpg->lport_tpg_enabled, 0);
+		qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+	}
+
+	return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
+        &tcm_qla2xxx_npiv_tpg_enable.attr,
+        NULL,
+};
+
 static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
 	struct se_wwn *wwn,
 	struct config_group *group,
@@ -1650,6 +1715,9 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
 	struct scsi_qla_host *npiv_vha;
 	struct tcm_qla2xxx_lport *lport =
 			(struct tcm_qla2xxx_lport *)target_lport_ptr;
+	struct tcm_qla2xxx_lport *base_lport =
+			(struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
+	struct tcm_qla2xxx_tpg *base_tpg;
 	struct fc_vport_identifiers vport_id;
 
 	if (!qla_tgt_mode_enabled(base_vha)) {
@@ -1657,6 +1725,13 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
 		return -EPERM;
 	}
 
+	if (!base_lport || !base_lport->tpg_1 ||
+	    !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
+		pr_err("qla2xxx base_lport or tpg_1 not available\n");
+		return -EPERM;
+	}
+	base_tpg = base_lport->tpg_1;
+
 	memset(&vport_id, 0, sizeof(vport_id));
 	vport_id.port_name = npiv_wwpn;
 	vport_id.node_name = npiv_wwnn;
@@ -1675,7 +1750,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
 	npiv_vha = (struct scsi_qla_host *)vport->dd_data;
 	npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
 	lport->qla_vha = npiv_vha;
-
 	scsi_host_get(npiv_vha->host);
 	return 0;
 }
@@ -1714,8 +1788,6 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
 	}
 	lport->lport_npiv_wwpn = npiv_wwpn;
 	lport->lport_npiv_wwnn = npiv_wwnn;
-	tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
-			TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
 	sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
 
 	ret = tcm_qla2xxx_init_lport(lport);
@@ -1824,7 +1896,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
 static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
 	.get_fabric_name		= tcm_qla2xxx_npiv_get_fabric_name,
 	.get_fabric_proto_ident		= tcm_qla2xxx_get_fabric_proto_ident,
-	.tpg_get_wwn			= tcm_qla2xxx_npiv_get_fabric_wwn,
+	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,
 	.tpg_get_tag			= tcm_qla2xxx_get_tag,
 	.tpg_get_default_depth		= tcm_qla2xxx_get_default_depth,
 	.tpg_get_pr_transport_id	= tcm_qla2xxx_get_pr_transport_id,
@@ -1935,7 +2007,7 @@ static int tcm_qla2xxx_register_configfs(void)
 	 */
 	npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
 	npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
-	    tcm_qla2xxx_tpg_attrs;
+	    tcm_qla2xxx_npiv_tpg_attrs;
 	npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
 	npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
 	npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 275d8b9a7a34..33aaac8c7d59 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -4,8 +4,6 @@
 #define TCM_QLA2XXX_VERSION	"v0.1"
 /* length of ASCII WWPNs including pad */
 #define TCM_QLA2XXX_NAMELEN	32
-/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
-#define TCM_QLA2XXX_NPIV_NAMELEN 66
 
 #include "qla_target.h"
 
@@ -43,6 +41,9 @@ struct tcm_qla2xxx_tpg {
 	struct tcm_qla2xxx_tpg_attrib tpg_attrib;
 	/* Returned by tcm_qla2xxx_make_tpg() */
 	struct se_portal_group se_tpg;
+	/* Items for dealing with configfs_depend_item */
+	struct completion tpg_base_comp;
+	struct work_struct tpg_base_work;
 };
 
 struct tcm_qla2xxx_fc_loopid {
@@ -62,8 +63,6 @@ struct tcm_qla2xxx_lport {
 	char lport_name[TCM_QLA2XXX_NAMELEN];
 	/* ASCII formatted naa WWPN for VPD page 83 etc */
 	char lport_naa_name[TCM_QLA2XXX_NAMELEN];
-	/* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
-	char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
 	/* map for fc_port pointers in 24-bit FC Port ID space */
 	struct btree_head32 lport_fcport_map;
 	/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7bd7f0d5f050..62ec84b42e31 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
 
 	host_dev = scsi_get_device(shost);
 	if (host_dev && host_dev->dma_mask)
-		bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
+		bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
 
 	return bounce_limit;
 }