summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/hwspinlock/Kconfig22
-rw-r--r--drivers/hwspinlock/Makefile6
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c548
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h61
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c231
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c36
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/omap2.c367
-rw-r--r--drivers/mtd/onenand/omap2.c36
-rw-r--r--drivers/spi/omap2_mcspi.c222
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h4
-rw-r--r--drivers/usb/musb/musbhsdma.h2
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/watchdog/omap_wdt.c25
19 files changed, 1283 insertions, 306 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9bfb71ff3a6a..177c7d156933 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -117,4 +117,6 @@ source "drivers/staging/Kconfig"
 source "drivers/platform/Kconfig"
 
 source "drivers/clk/Kconfig"
+
+source "drivers/hwspinlock/Kconfig"
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index b423bb16c3a8..3f135b6fb014 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -117,3 +117,5 @@ obj-y				+= platform/
 obj-y				+= ieee802154/
 #common clk code
 obj-y				+= clk/
+
+obj-$(CONFIG_HWSPINLOCK)	+= hwspinlock/
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
new file mode 100644
index 000000000000..eb4af28f8567
--- /dev/null
+++ b/drivers/hwspinlock/Kconfig
@@ -0,0 +1,22 @@
+#
+# Generic HWSPINLOCK framework
+#
+
+config HWSPINLOCK
+	tristate "Generic Hardware Spinlock framework"
+	help
+	  Say y here to support the generic hardware spinlock framework.
+	  You only need to enable this if you have hardware spinlock module
+	  on your system (usually only relevant if your system has remote slave
+	  coprocessors).
+
+	  If unsure, say N.
+
+config HWSPINLOCK_OMAP
+	tristate "OMAP Hardware Spinlock device"
+	depends on HWSPINLOCK && ARCH_OMAP4
+	help
+	  Say y here to support the OMAP Hardware Spinlock device (firstly
+	  introduced in OMAP4).
+
+	  If unsure, say N.
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
new file mode 100644
index 000000000000..5729a3f7ed3d
--- /dev/null
+++ b/drivers/hwspinlock/Makefile
@@ -0,0 +1,6 @@
+#
+# Generic Hardware Spinlock framework
+#
+
+obj-$(CONFIG_HWSPINLOCK)		+= hwspinlock_core.o
+obj-$(CONFIG_HWSPINLOCK_OMAP)		+= omap_hwspinlock.o
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
new file mode 100644
index 000000000000..43a62714b4fb
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -0,0 +1,548 @@
+/*
+ * Hardware spinlock framework
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/radix-tree.h>
+#include <linux/hwspinlock.h>
+#include <linux/pm_runtime.h>
+
+#include "hwspinlock_internal.h"
+
+/* radix tree tags */
+#define HWSPINLOCK_UNUSED	(0) /* tags an hwspinlock as unused */
+
+/*
+ * A radix tree is used to maintain the available hwspinlock instances.
+ * The tree associates hwspinlock pointers with their integer key id,
+ * and provides easy-to-use API which makes the hwspinlock core code simple
+ * and easy to read.
+ *
+ * Radix trees are quick on lookups, and reasonably efficient in terms of
+ * storage, especially with high density usages such as this framework
+ * requires (a continuous range of integer keys, beginning with zero, is
+ * used as the ID's of the hwspinlock instances).
+ *
+ * The radix tree API supports tagging items in the tree, which this
+ * framework uses to mark unused hwspinlock instances (see the
+ * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
+ * tree, looking for an unused hwspinlock instance, is now reduced to a
+ * single radix tree API call.
+ */
+static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
+
+/*
+ * Synchronization of access to the tree is achieved using this spinlock,
+ * as the radix-tree API requires that users provide all synchronisation.
+ */
+static DEFINE_SPINLOCK(hwspinlock_tree_lock);
+
+/**
+ * __hwspin_trylock() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ * @mode: controls whether local interrupts are disabled or not
+ * @flags: a pointer where the caller's interrupt state will be saved at (if
+ *         requested)
+ *
+ * This function attempts to lock an hwspinlock, and will immediately
+ * fail if the hwspinlock is already taken.
+ *
+ * Upon a successful return from this function, preemption (and possibly
+ * interrupts) is disabled, so the caller must not sleep, and is advised to
+ * release the hwspinlock as soon as possible. This is required in order to
+ * minimize remote cores polling on the hardware interconnect.
+ *
+ * The user decides whether local interrupts are disabled or not, and if yes,
+ * whether he wants their previous state to be saved. It is up to the user
+ * to choose the appropriate @mode of operation, exactly the same way users
+ * should decide between spin_trylock, spin_trylock_irq and
+ * spin_trylock_irqsave.
+ *
+ * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
+ * the hwspinlock was already taken.
+ * This function will never sleep.
+ */
+int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+	int ret;
+
+	BUG_ON(!hwlock);
+	BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+
+	/*
+	 * This spin_lock{_irq, _irqsave} serves three purposes:
+	 *
+	 * 1. Disable preemption, in order to minimize the period of time
+	 *    in which the hwspinlock is taken. This is important in order
+	 *    to minimize the possible polling on the hardware interconnect
+	 *    by a remote user of this lock.
+	 * 2. Make the hwspinlock SMP-safe (so we can take it from
+	 *    additional contexts on the local host).
+	 * 3. Ensure that in_atomic/might_sleep checks catch potential
+	 *    problems with hwspinlock usage (e.g. scheduler checks like
+	 *    'scheduling while atomic' etc.)
+	 */
+	if (mode == HWLOCK_IRQSTATE)
+		ret = spin_trylock_irqsave(&hwlock->lock, *flags);
+	else if (mode == HWLOCK_IRQ)
+		ret = spin_trylock_irq(&hwlock->lock);
+	else
+		ret = spin_trylock(&hwlock->lock);
+
+	/* is lock already taken by another context on the local cpu ? */
+	if (!ret)
+		return -EBUSY;
+
+	/* try to take the hwspinlock device */
+	ret = hwlock->ops->trylock(hwlock);
+
+	/* if hwlock is already taken, undo spin_trylock_* and exit */
+	if (!ret) {
+		if (mode == HWLOCK_IRQSTATE)
+			spin_unlock_irqrestore(&hwlock->lock, *flags);
+		else if (mode == HWLOCK_IRQ)
+			spin_unlock_irq(&hwlock->lock);
+		else
+			spin_unlock(&hwlock->lock);
+
+		return -EBUSY;
+	}
+
+	/*
+	 * We can be sure the other core's memory operations
+	 * are observable to us only _after_ we successfully take
+	 * the hwspinlock, and we must make sure that subsequent memory
+	 * operations (both reads and writes) will not be reordered before
+	 * we actually took the hwspinlock.
+	 *
+	 * Note: the implicit memory barrier of the spinlock above is too
+	 * early, so we need this additional explicit memory barrier.
+	 */
+	mb();
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__hwspin_trylock);
+
+/**
+ * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @timeout: timeout value in msecs
+ * @mode: mode which controls whether local interrupts are disabled or not
+ * @flags: a pointer to where the caller's interrupt state will be saved at (if
+ *         requested)
+ *
+ * This function locks the given @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up after @timeout msecs have elapsed.
+ *
+ * Upon a successful return from this function, preemption is disabled
+ * (and possibly local interrupts, too), so the caller must not sleep,
+ * and is advised to release the hwspinlock as soon as possible.
+ * This is required in order to minimize remote cores polling on the
+ * hardware interconnect.
+ *
+ * The user decides whether local interrupts are disabled or not, and if yes,
+ * whether he wants their previous state to be saved. It is up to the user
+ * to choose the appropriate @mode of operation, exactly the same way users
+ * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
+					int mode, unsigned long *flags)
+{
+	int ret;
+	unsigned long expire;
+
+	expire = msecs_to_jiffies(to) + jiffies;
+
+	for (;;) {
+		/* Try to take the hwspinlock */
+		ret = __hwspin_trylock(hwlock, mode, flags);
+		if (ret != -EBUSY)
+			break;
+
+		/*
+		 * The lock is already taken, let's check if the user wants
+		 * us to try again
+		 */
+		if (time_is_before_eq_jiffies(expire))
+			return -ETIMEDOUT;
+
+		/*
+		 * Allow platform-specific relax handlers to prevent
+		 * hogging the interconnect (no sleeping, though)
+		 */
+		if (hwlock->ops->relax)
+			hwlock->ops->relax(hwlock);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
+
+/**
+ * __hwspin_unlock() - unlock a specific hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ * @mode: controls whether local interrupts needs to be restored or not
+ * @flags: previous caller's interrupt state to restore (if requested)
+ *
+ * This function will unlock a specific hwspinlock, enable preemption and
+ * (possibly) enable interrupts or restore their previous state.
+ * @hwlock must be already locked before calling this function: it is a bug
+ * to call unlock on a @hwlock that is already unlocked.
+ *
+ * The user decides whether local interrupts should be enabled or not, and
+ * if yes, whether he wants their previous state to be restored. It is up
+ * to the user to choose the appropriate @mode of operation, exactly the
+ * same way users decide between spin_unlock, spin_unlock_irq and
+ * spin_unlock_irqrestore.
+ *
+ * The function will never sleep.
+ */
+void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+	BUG_ON(!hwlock);
+	BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+
+	/*
+	 * We must make sure that memory operations (both reads and writes),
+	 * done before unlocking the hwspinlock, will not be reordered
+	 * after the lock is released.
+	 *
+	 * That's the purpose of this explicit memory barrier.
+	 *
+	 * Note: the memory barrier induced by the spin_unlock below is too
+	 * late; the other core is going to access memory soon after it will
+	 * take the hwspinlock, and by then we want to be sure our memory
+	 * operations are already observable.
+	 */
+	mb();
+
+	hwlock->ops->unlock(hwlock);
+
+	/* Undo the spin_trylock{_irq, _irqsave} called while locking */
+	if (mode == HWLOCK_IRQSTATE)
+		spin_unlock_irqrestore(&hwlock->lock, *flags);
+	else if (mode == HWLOCK_IRQ)
+		spin_unlock_irq(&hwlock->lock);
+	else
+		spin_unlock(&hwlock->lock);
+}
+EXPORT_SYMBOL_GPL(__hwspin_unlock);
+
+/**
+ * hwspin_lock_register() - register a new hw spinlock
+ * @hwlock: hwspinlock to register.
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to register a new hwspinlock instance.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context.
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int hwspin_lock_register(struct hwspinlock *hwlock)
+{
+	struct hwspinlock *tmp;
+	int ret;
+
+	if (!hwlock || !hwlock->ops ||
+		!hwlock->ops->trylock || !hwlock->ops->unlock) {
+		pr_err("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	spin_lock_init(&hwlock->lock);
+
+	spin_lock(&hwspinlock_tree_lock);
+
+	ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
+	if (ret)
+		goto out;
+
+	/* mark this hwspinlock as available */
+	tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
+							HWSPINLOCK_UNUSED);
+
+	/* self-sanity check which should never fail */
+	WARN_ON(tmp != hwlock);
+
+out:
+	spin_unlock(&hwspinlock_tree_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_register);
+
+/**
+ * hwspin_lock_unregister() - unregister an hw spinlock
+ * @id: index of the specific hwspinlock to unregister
+ *
+ * This function should be called from the underlying platform-specific
+ * implementation, to unregister an existing (and unused) hwspinlock.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context.
+ *
+ * Returns the address of hwspinlock @id on success, or NULL on failure
+ */
+struct hwspinlock *hwspin_lock_unregister(unsigned int id)
+{
+	struct hwspinlock *hwlock = NULL;
+	int ret;
+
+	spin_lock(&hwspinlock_tree_lock);
+
+	/* make sure the hwspinlock is not in use (tag is set) */
+	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
+	if (ret == 0) {
+		pr_err("hwspinlock %d still in use (or not present)\n", id);
+		goto out;
+	}
+
+	hwlock = radix_tree_delete(&hwspinlock_tree, id);
+	if (!hwlock) {
+		pr_err("failed to delete hwspinlock %d\n", id);
+		goto out;
+	}
+
+out:
+	spin_unlock(&hwspinlock_tree_lock);
+	return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
+
+/**
+ * __hwspin_lock_request() - tag an hwspinlock as used and power it up
+ *
+ * This is an internal function that prepares an hwspinlock instance
+ * before it is given to the user. The function assumes that
+ * hwspinlock_tree_lock is taken.
+ *
+ * Returns 0 or positive to indicate success, and a negative value to
+ * indicate an error (with the appropriate error code)
+ */
+static int __hwspin_lock_request(struct hwspinlock *hwlock)
+{
+	struct hwspinlock *tmp;
+	int ret;
+
+	/* prevent underlying implementation from being removed */
+	if (!try_module_get(hwlock->owner)) {
+		dev_err(hwlock->dev, "%s: can't get owner\n", __func__);
+		return -EINVAL;
+	}
+
+	/* notify PM core that power is now needed */
+	ret = pm_runtime_get_sync(hwlock->dev);
+	if (ret < 0) {
+		dev_err(hwlock->dev, "%s: can't power on device\n", __func__);
+		return ret;
+	}
+
+	/* mark hwspinlock as used, should not fail */
+	tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,
+							HWSPINLOCK_UNUSED);
+
+	/* self-sanity check that should never fail */
+	WARN_ON(tmp != hwlock);
+
+	return ret;
+}
+
+/**
+ * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
+ * @hwlock: a valid hwspinlock instance
+ *
+ * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
+ */
+int hwspin_lock_get_id(struct hwspinlock *hwlock)
+{
+	if (!hwlock) {
+		pr_err("invalid hwlock\n");
+		return -EINVAL;
+	}
+
+	return hwlock->id;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
+
+/**
+ * hwspin_lock_request() - request an hwspinlock
+ *
+ * This function should be called by users of the hwspinlock device,
+ * in order to dynamically assign them an unused hwspinlock.
+ * Usually the user of this lock will then have to communicate the lock's id
+ * to the remote core before it can be used for synchronization (to get the
+ * id of a given hwlock, use hwspin_lock_get_id()).
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *hwspin_lock_request(void)
+{
+	struct hwspinlock *hwlock;
+	int ret;
+
+	spin_lock(&hwspinlock_tree_lock);
+
+	/* look for an unused lock */
+	ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
+						0, 1, HWSPINLOCK_UNUSED);
+	if (ret == 0) {
+		pr_warn("a free hwspinlock is not available\n");
+		hwlock = NULL;
+		goto out;
+	}
+
+	/* sanity check that should never fail */
+	WARN_ON(ret > 1);
+
+	/* mark as used and power up */
+	ret = __hwspin_lock_request(hwlock);
+	if (ret < 0)
+		hwlock = NULL;
+
+out:
+	spin_unlock(&hwspinlock_tree_lock);
+	return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_request);
+
+/**
+ * hwspin_lock_request_specific() - request for a specific hwspinlock
+ * @id: index of the specific hwspinlock that is requested
+ *
+ * This function should be called by users of the hwspinlock module,
+ * in order to assign them a specific hwspinlock.
+ * Usually early board code will be calling this function in order to
+ * reserve specific hwspinlock ids for predefined purposes.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns the address of the assigned hwspinlock, or NULL on error
+ */
+struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
+{
+	struct hwspinlock *hwlock;
+	int ret;
+
+	spin_lock(&hwspinlock_tree_lock);
+
+	/* make sure this hwspinlock exists */
+	hwlock = radix_tree_lookup(&hwspinlock_tree, id);
+	if (!hwlock) {
+		pr_warn("hwspinlock %u does not exist\n", id);
+		goto out;
+	}
+
+	/* sanity check (this shouldn't happen) */
+	WARN_ON(hwlock->id != id);
+
+	/* make sure this hwspinlock is unused */
+	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
+	if (ret == 0) {
+		pr_warn("hwspinlock %u is already in use\n", id);
+		hwlock = NULL;
+		goto out;
+	}
+
+	/* mark as used and power up */
+	ret = __hwspin_lock_request(hwlock);
+	if (ret < 0)
+		hwlock = NULL;
+
+out:
+	spin_unlock(&hwspinlock_tree_lock);
+	return hwlock;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
+
+/**
+ * hwspin_lock_free() - free a specific hwspinlock
+ * @hwlock: the specific hwspinlock to free
+ *
+ * This function mark @hwlock as free again.
+ * Should only be called with an @hwlock that was retrieved from
+ * an earlier call to omap_hwspin_lock_request{_specific}.
+ *
+ * Can be called from an atomic context (will not sleep) but not from
+ * within interrupt context (simply because there is no use case for
+ * that yet).
+ *
+ * Returns 0 on success, or an appropriate error code on failure
+ */
+int hwspin_lock_free(struct hwspinlock *hwlock)
+{
+	struct hwspinlock *tmp;
+	int ret;
+
+	if (!hwlock) {
+		pr_err("invalid hwlock\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&hwspinlock_tree_lock);
+
+	/* make sure the hwspinlock is used */
+	ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
+							HWSPINLOCK_UNUSED);
+	if (ret == 1) {
+		dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);
+		dump_stack();
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* notify the underlying device that power is not needed */
+	ret = pm_runtime_put(hwlock->dev);
+	if (ret < 0)
+		goto out;
+
+	/* mark this hwspinlock as available */
+	tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
+							HWSPINLOCK_UNUSED);
+
+	/* sanity check (this shouldn't happen) */
+	WARN_ON(tmp != hwlock);
+
+	module_put(hwlock->owner);
+
+out:
+	spin_unlock(&hwspinlock_tree_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hwspin_lock_free);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock interface");
+MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
new file mode 100644
index 000000000000..69935e6b93e5
--- /dev/null
+++ b/drivers/hwspinlock/hwspinlock_internal.h
@@ -0,0 +1,61 @@
+/*
+ * Hardware spinlocks internal header
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HWSPINLOCK_HWSPINLOCK_H
+#define __HWSPINLOCK_HWSPINLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+
+/**
+ * struct hwspinlock_ops - platform-specific hwspinlock handlers
+ *
+ * @trylock: make a single attempt to take the lock. returns 0 on
+ *	     failure and true on success. may _not_ sleep.
+ * @unlock:  release the lock. always succeed. may _not_ sleep.
+ * @relax:   optional, platform-specific relax handler, called by hwspinlock
+ *	     core while spinning on a lock, between two successive
+ *	     invocations of @trylock. may _not_ sleep.
+ */
+struct hwspinlock_ops {
+	int (*trylock)(struct hwspinlock *lock);
+	void (*unlock)(struct hwspinlock *lock);
+	void (*relax)(struct hwspinlock *lock);
+};
+
+/**
+ * struct hwspinlock - this struct represents a single hwspinlock instance
+ *
+ * @dev: underlying device, will be used to invoke runtime PM api
+ * @ops: platform-specific hwspinlock handlers
+ * @id: a global, unique, system-wide, index of the lock.
+ * @lock: initialized and used by hwspinlock core
+ * @owner: underlying implementation module, used to maintain module ref count
+ *
+ * Note: currently simplicity was opted for, but later we can squeeze some
+ * memory bytes by grouping the dev, ops and owner members in a single
+ * per-platform struct, and have all hwspinlocks point at it.
+ */
+struct hwspinlock {
+	struct device *dev;
+	const struct hwspinlock_ops *ops;
+	int id;
+	spinlock_t lock;
+	struct module *owner;
+};
+
+#endif /* __HWSPINLOCK_HWSPINLOCK_H */
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
new file mode 100644
index 000000000000..a8f02734c026
--- /dev/null
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -0,0 +1,231 @@
+/*
+ * OMAP hardware spinlock driver
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Simon Que <sque@ti.com>
+ *          Hari Kanigeri <h-kanigeri2@ti.com>
+ *          Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/hwspinlock.h>
+#include <linux/platform_device.h>
+
+#include "hwspinlock_internal.h"
+
+/* Spinlock register offsets */
+#define SYSSTATUS_OFFSET		0x0014
+#define LOCK_BASE_OFFSET		0x0800
+
+#define SPINLOCK_NUMLOCKS_BIT_OFFSET	(24)
+
+/* Possible values of SPINLOCK_LOCK_REG */
+#define SPINLOCK_NOTTAKEN		(0)	/* free */
+#define SPINLOCK_TAKEN			(1)	/* locked */
+
+#define to_omap_hwspinlock(lock)	\
+	container_of(lock, struct omap_hwspinlock, lock)
+
+struct omap_hwspinlock {
+	struct hwspinlock lock;
+	void __iomem *addr;
+};
+
+struct omap_hwspinlock_state {
+	int num_locks;			/* Total number of locks in system */
+	void __iomem *io_base;		/* Mapped base address */
+};
+
+static int omap_hwspinlock_trylock(struct hwspinlock *lock)
+{
+	struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
+
+	/* attempt to acquire the lock by reading its value */
+	return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr));
+}
+
+static void omap_hwspinlock_unlock(struct hwspinlock *lock)
+{
+	struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
+
+	/* release the lock by writing 0 to it */
+	writel(SPINLOCK_NOTTAKEN, omap_lock->addr);
+}
+
+/*
+ * relax the OMAP interconnect while spinning on it.
+ *
+ * The specs recommended that the retry delay time will be
+ * just over half of the time that a requester would be
+ * expected to hold the lock.
+ *
+ * The number below is taken from an hardware specs example,
+ * obviously it is somewhat arbitrary.
+ */
+static void omap_hwspinlock_relax(struct hwspinlock *lock)
+{
+	ndelay(50);
+}
+
+static const struct hwspinlock_ops omap_hwspinlock_ops = {
+	.trylock = omap_hwspinlock_trylock,
+	.unlock = omap_hwspinlock_unlock,
+	.relax = omap_hwspinlock_relax,
+};
+
+static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
+{
+	struct omap_hwspinlock *omap_lock;
+	struct omap_hwspinlock_state *state;
+	struct hwspinlock *lock;
+	struct resource *res;
+	void __iomem *io_base;
+	int i, ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	io_base = ioremap(res->start, resource_size(res));
+	if (!io_base) {
+		ret = -ENOMEM;
+		goto free_state;
+	}
+
+	/* Determine number of locks */
+	i = readl(io_base + SYSSTATUS_OFFSET);
+	i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;
+
+	/* one of the four lsb's must be set, and nothing else */
+	if (hweight_long(i & 0xf) != 1 || i > 8) {
+		ret = -EINVAL;
+		goto iounmap_base;
+	}
+
+	state->num_locks = i * 32;
+	state->io_base = io_base;
+
+	platform_set_drvdata(pdev, state);
+
+	/*
+	 * runtime PM will make sure the clock of this module is
+	 * enabled iff at least one lock is requested
+	 */
+	pm_runtime_enable(&pdev->dev);
+
+	for (i = 0; i < state->num_locks; i++) {
+		omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL);
+		if (!omap_lock) {
+			ret = -ENOMEM;
+			goto free_locks;
+		}
+
+		omap_lock->lock.dev = &pdev->dev;
+		omap_lock->lock.owner = THIS_MODULE;
+		omap_lock->lock.id = i;
+		omap_lock->lock.ops = &omap_hwspinlock_ops;
+		omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
+
+		ret = hwspin_lock_register(&omap_lock->lock);
+		if (ret) {
+			kfree(omap_lock);
+			goto free_locks;
+		}
+	}
+
+	return 0;
+
+free_locks:
+	while (--i >= 0) {
+		lock = hwspin_lock_unregister(i);
+		/* this should't happen, but let's give our best effort */
+		if (!lock) {
+			dev_err(&pdev->dev, "%s: cleanups failed\n", __func__);
+			continue;
+		}
+		omap_lock = to_omap_hwspinlock(lock);
+		kfree(omap_lock);
+	}
+	pm_runtime_disable(&pdev->dev);
+iounmap_base:
+	iounmap(io_base);
+free_state:
+	kfree(state);
+	return ret;
+}
+
+static int omap_hwspinlock_remove(struct platform_device *pdev)
+{
+	struct omap_hwspinlock_state *state = platform_get_drvdata(pdev);
+	struct hwspinlock *lock;
+	struct omap_hwspinlock *omap_lock;
+	int i;
+
+	for (i = 0; i < state->num_locks; i++) {
+		lock = hwspin_lock_unregister(i);
+		/* this shouldn't happen at this point. if it does, at least
+		 * don't continue with the remove */
+		if (!lock) {
+			dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i);
+			return -EBUSY;
+		}
+
+		omap_lock = to_omap_hwspinlock(lock);
+		kfree(omap_lock);
+	}
+
+	pm_runtime_disable(&pdev->dev);
+	iounmap(state->io_base);
+	kfree(state);
+
+	return 0;
+}
+
+static struct platform_driver omap_hwspinlock_driver = {
+	.probe		= omap_hwspinlock_probe,
+	.remove		= omap_hwspinlock_remove,
+	.driver		= {
+		.name	= "omap_hwspinlock",
+	},
+};
+
+static int __init omap_hwspinlock_init(void)
+{
+	return platform_driver_register(&omap_hwspinlock_driver);
+}
+/* board init code might need to reserve hwspinlocks for predefined purposes */
+postcore_initcall(omap_hwspinlock_init);
+
+static void __exit omap_hwspinlock_exit(void)
+{
+	platform_driver_unregister(&omap_hwspinlock_driver);
+}
+module_exit(omap_hwspinlock_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock driver for OMAP");
+MODULE_AUTHOR("Simon Que <sque@ti.com>");
+MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>");
+MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index afe8c6fa166a..54f91321749a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -225,7 +225,7 @@ config MMC_OMAP
 
 config MMC_OMAP_HS
 	tristate "TI OMAP High Speed Multimedia Card Interface support"
-	depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
+	depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
 	help
 	  This selects the TI OMAP High Speed Multimedia card Interface.
 	  If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 078fdf11af03..158c0ee53b2c 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -118,7 +118,7 @@
 
 #define MMC_TIMEOUT_MS		20
 #define OMAP_MMC_MASTER_CLOCK	96000000
-#define DRIVER_NAME		"mmci-omap-hs"
+#define DRIVER_NAME		"omap_hsmmc"
 
 /* Timeouts for entering power saving states on inactivity, msec */
 #define OMAP_MMC_DISABLED_TIMEOUT	100
@@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
 	return ret;
 }
 
-static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
+static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on,
 				   int vdd)
 {
 	struct omap_hsmmc_host *host =
@@ -316,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
 	return ret;
 }
 
+static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on,
+					int vdd)
+{
+	return 0;
+}
+
 static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
 				  int vdd, int cardsleep)
 {
@@ -326,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
 	return regulator_set_mode(host->vcc, mode);
 }
 
-static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
+static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep,
 				   int vdd, int cardsleep)
 {
 	struct omap_hsmmc_host *host =
@@ -365,6 +371,12 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
 		return regulator_enable(host->vcc_aux);
 }
 
+static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep,
+					int vdd, int cardsleep)
+{
+	return 0;
+}
+
 static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
 {
 	struct regulator *reg;
@@ -379,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
 		break;
 	case OMAP_MMC2_DEVID:
 	case OMAP_MMC3_DEVID:
+	case OMAP_MMC5_DEVID:
 		/* Off-chip level shifting, or none */
-		mmc_slot(host).set_power = omap_hsmmc_23_set_power;
-		mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep;
+		mmc_slot(host).set_power = omap_hsmmc_235_set_power;
+		mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep;
 		break;
+	case OMAP_MMC4_DEVID:
+		mmc_slot(host).set_power = omap_hsmmc_4_set_power;
+		mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep;
 	default:
 		pr_err("MMC%d configuration not supported!\n", host->id);
 		return -EINVAL;
@@ -1555,7 +1571,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 		break;
 	}
 
-	if (host->id == OMAP_MMC1_DEVID) {
+	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
 		/* Only MMC1 can interface at 3V without some flavor
 		 * of external transceiver; but they all handle 1.8V.
 		 */
@@ -1647,7 +1663,7 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
 	u32 hctl, capa, value;
 
 	/* Only MMC1 supports 3.0V */
-	if (host->id == OMAP_MMC1_DEVID) {
+	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
 		hctl = SDVS30;
 		capa = VS30 | VS18;
 	} else {
@@ -2101,14 +2117,14 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
 	/* we start off in DISABLED state */
 	host->dpm_state = DISABLED;
 
-	if (mmc_host_enable(host->mmc) != 0) {
+	if (clk_enable(host->iclk) != 0) {
 		clk_put(host->iclk);
 		clk_put(host->fclk);
 		goto err1;
 	}
 
-	if (clk_enable(host->iclk) != 0) {
-		mmc_host_disable(host->mmc);
+	if (mmc_host_enable(host->mmc) != 0) {
+		clk_disable(host->iclk);
 		clk_put(host->iclk);
 		clk_put(host->fclk);
 		goto err1;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 450afc5df0bd..4f6c06f16328 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -106,23 +106,6 @@ config MTD_NAND_OMAP2
 	help
           Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
 
-config MTD_NAND_OMAP_PREFETCH
-	bool "GPMC prefetch support for NAND Flash device"
-	depends on MTD_NAND_OMAP2
-	default y
-	help
-	 The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
-	 to improve the performance.
-
-config MTD_NAND_OMAP_PREFETCH_DMA
-	depends on MTD_NAND_OMAP_PREFETCH
-	bool "DMA mode"
-	default n
-	help
-	 The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
-	 or in DMA interrupt mode.
-	 Say y for DMA mode or MPU mode will be used
-
 config MTD_NAND_IDS
 	tristate
 
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 28af71c61834..7b8f1fffc528 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
+#include <linux/interrupt.h>
 #include <linux/jiffies.h>
 #include <linux/sched.h>
 #include <linux/mtd/mtd.h>
@@ -24,6 +25,7 @@
 #include <plat/nand.h>
 
 #define	DRIVER_NAME	"omap2-nand"
+#define	OMAP_NAND_TIMEOUT_MS	5000
 
 #define NAND_Ecc_P1e		(1 << 0)
 #define NAND_Ecc_P2e		(1 << 1)
@@ -96,26 +98,19 @@
 static const char *part_probes[] = { "cmdlinepart", NULL };
 #endif
 
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
-static int use_prefetch = 1;
-
-/* "modprobe ... use_prefetch=0" etc */
-module_param(use_prefetch, bool, 0);
-MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
-
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
-static int use_dma = 1;
+/* oob info generated runtime depending on ecc algorithm and layout selected */
+static struct nand_ecclayout omap_oobinfo;
+/* Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks
+ */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr bb_descrip_flashbased = {
+	.options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+	.offs = 0,
+	.len = 1,
+	.pattern = scan_ff_pattern,
+};
 
-/* "modprobe ... use_dma=0" etc */
-module_param(use_dma, bool, 0);
-MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
-#else
-static const int use_dma;
-#endif
-#else
-const int use_prefetch;
-static const int use_dma;
-#endif
 
 struct omap_nand_info {
 	struct nand_hw_control		controller;
@@ -129,6 +124,13 @@ struct omap_nand_info {
 	unsigned long			phys_base;
 	struct completion		comp;
 	int				dma_ch;
+	int				gpmc_irq;
+	enum {
+		OMAP_NAND_IO_READ = 0,	/* read */
+		OMAP_NAND_IO_WRITE,	/* write */
+	} iomode;
+	u_char				*buf;
+	int					buf_len;
 };
 
 /**
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
 	}
 
 	/* configure and start prefetch transfer */
-	ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
+	ret = gpmc_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
 	if (ret) {
 		/* PFPW engine is busy, use cpu copy method */
 		if (info->nand.options & NAND_BUSWIDTH_16)
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
 {
 	struct omap_nand_info *info = container_of(mtd,
 						struct omap_nand_info, mtd);
-	uint32_t pref_count = 0, w_count = 0;
+	uint32_t w_count = 0;
 	int i = 0, ret = 0;
 	u16 *p;
+	unsigned long tim, limit;
 
 	/* take care of subpage writes */
 	if (len % 2 != 0) {
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
 	}
 
 	/*  configure and start prefetch transfer */
-	ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
+	ret = gpmc_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
 	if (ret) {
 		/* PFPW engine is busy, use cpu copy method */
 		if (info->nand.options & NAND_BUSWIDTH_16)
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
 				iowrite16(*p++, info->nand.IO_ADDR_W);
 		}
 		/* wait for data to flushed-out before reset the prefetch */
-		do {
-			pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT);
-		} while (pref_count);
+		tim = 0;
+		limit = (loops_per_jiffy *
+					msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+		while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+			cpu_relax();
+
 		/* disable and stop the PFPW engine */
 		gpmc_prefetch_reset(info->gpmc_cs);
 	}
 }
 
-#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
 /*
  * omap_nand_dma_cb: callback on the completion of dma transfer
  * @lch: logical channel
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 {
 	struct omap_nand_info *info = container_of(mtd,
 					struct omap_nand_info, mtd);
-	uint32_t prefetch_status = 0;
 	enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
 							DMA_FROM_DEVICE;
 	dma_addr_t dma_addr;
 	int ret;
+	unsigned long tim, limit;
 
-	/* The fifo depth is 64 bytes. We have a sync at each frame and frame
-	 * length is 64 bytes.
+	/* The fifo depth is 64 bytes max.
+	 * But configure the FIFO-threahold to 32 to get a sync at each frame
+	 * and frame length is 32 bytes.
 	 */
 	int buf_len = len >> 6;
 
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 					OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
 	}
 	/*  configure and start prefetch transfer */
-	ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
+	ret = gpmc_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
 	if (ret)
-		/* PFPW engine is busy, use cpu copy methode */
+		/* PFPW engine is busy, use cpu copy method */
 		goto out_copy;
 
 	init_completion(&info->comp);
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 
 	/* setup and start DMA using dma_addr */
 	wait_for_completion(&info->comp);
+	tim = 0;
+	limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+	while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+		cpu_relax();
 
-	do {
-		prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
-	} while (prefetch_status);
 	/* disable and stop the PFPW engine */
 	gpmc_prefetch_reset(info->gpmc_cs);
 
@@ -426,14 +436,6 @@ out_copy:
 			: omap_write_buf8(mtd, (u_char *) addr, len);
 	return 0;
 }
-#else
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
-static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
-					unsigned int len, int is_write)
-{
-	return 0;
-}
-#endif
 
 /**
  * omap_read_buf_dma_pref - read data from NAND controller into buffer
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
 		omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
 }
 
+/*
+ * omap_nand_irq - GMPC irq handler
+ * @this_irq: gpmc irq number
+ * @dev: omap_nand_info structure pointer is passed here
+ */
+static irqreturn_t omap_nand_irq(int this_irq, void *dev)
+{
+	struct omap_nand_info *info = (struct omap_nand_info *) dev;
+	u32 bytes;
+	u32 irq_stat;
+
+	irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+	bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+	bytes = bytes  & 0xFFFC; /* io in multiple of 4 bytes */
+	if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
+		if (irq_stat & 0x2)
+			goto done;
+
+		if (info->buf_len && (info->buf_len < bytes))
+			bytes = info->buf_len;
+		else if (!info->buf_len)
+			bytes = 0;
+		iowrite32_rep(info->nand.IO_ADDR_W,
+						(u32 *)info->buf, bytes >> 2);
+		info->buf = info->buf + bytes;
+		info->buf_len -= bytes;
+
+	} else {
+		ioread32_rep(info->nand.IO_ADDR_R,
+						(u32 *)info->buf, bytes >> 2);
+		info->buf = info->buf + bytes;
+
+		if (irq_stat & 0x2)
+			goto done;
+	}
+	gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+	return IRQ_HANDLED;
+
+done:
+	complete(&info->comp);
+	/* disable irq */
+	gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
+
+	/* clear status */
+	gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * omap_read_buf_irq_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct omap_nand_info *info = container_of(mtd,
+						struct omap_nand_info, mtd);
+	int ret = 0;
+
+	if (len <= mtd->oobsize) {
+		omap_read_buf_pref(mtd, buf, len);
+		return;
+	}
+
+	info->iomode = OMAP_NAND_IO_READ;
+	info->buf = buf;
+	init_completion(&info->comp);
+
+	/*  configure and start prefetch transfer */
+	ret = gpmc_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
+	if (ret)
+		/* PFPW engine is busy, use cpu copy method */
+		goto out_copy;
+
+	info->buf_len = len;
+	/* enable irq */
+	gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+		(GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+	/* waiting for read to complete */
+	wait_for_completion(&info->comp);
+
+	/* disable and stop the PFPW engine */
+	gpmc_prefetch_reset(info->gpmc_cs);
+	return;
+
+out_copy:
+	if (info->nand.options & NAND_BUSWIDTH_16)
+		omap_read_buf16(mtd, buf, len);
+	else
+		omap_read_buf8(mtd, buf, len);
+}
+
+/*
+ * omap_write_buf_irq_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_irq_pref(struct mtd_info *mtd,
+					const u_char *buf, int len)
+{
+	struct omap_nand_info *info = container_of(mtd,
+						struct omap_nand_info, mtd);
+	int ret = 0;
+	unsigned long tim, limit;
+
+	if (len <= mtd->oobsize) {
+		omap_write_buf_pref(mtd, buf, len);
+		return;
+	}
+
+	info->iomode = OMAP_NAND_IO_WRITE;
+	info->buf = (u_char *) buf;
+	init_completion(&info->comp);
+
+	/* configure and start prefetch transfer : size=24 */
+	ret = gpmc_prefetch_enable(info->gpmc_cs,
+			(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
+	if (ret)
+		/* PFPW engine is busy, use cpu copy method */
+		goto out_copy;
+
+	info->buf_len = len;
+	/* enable irq */
+	gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
+			(GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+	/* waiting for write to complete */
+	wait_for_completion(&info->comp);
+	/* wait for data to flushed-out before reset the prefetch */
+	tim = 0;
+	limit = (loops_per_jiffy *  msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+	while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+		cpu_relax();
+
+	/* disable and stop the PFPW engine */
+	gpmc_prefetch_reset(info->gpmc_cs);
+	return;
+
+out_copy:
+	if (info->nand.options & NAND_BUSWIDTH_16)
+		omap_write_buf16(mtd, buf, len);
+	else
+		omap_write_buf8(mtd, buf, len);
+}
+
 /**
  * omap_verify_buf - Verify chip data against buffer
  * @mtd: MTD device structure
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
 	return 0;
 }
 
-#ifdef CONFIG_MTD_NAND_OMAP_HWECC
-
 /**
  * gen_true_ecc - This function will generate true ECC value
  * @ecc_buf: buffer to store ecc code
@@ -708,8 +859,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
 	gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
 }
 
-#endif
-
 /**
  * omap_wait - wait until the command is done
  * @mtd: MTD device structure
@@ -779,6 +928,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 	struct omap_nand_info		*info;
 	struct omap_nand_platform_data	*pdata;
 	int				err;
+	int				i, offset;
 
 	pdata = pdev->dev.platform_data;
 	if (pdata == NULL) {
@@ -804,7 +954,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 	info->mtd.name		= dev_name(&pdev->dev);
 	info->mtd.owner		= THIS_MODULE;
 
-	info->nand.options	|= pdata->devsize ? NAND_BUSWIDTH_16 : 0;
+	info->nand.options	= pdata->devsize;
 	info->nand.options	|= NAND_SKIP_BBTSCAN;
 
 	/* NAND write protect off */
@@ -842,28 +992,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 		info->nand.chip_delay = 50;
 	}
 
-	if (use_prefetch) {
-
+	switch (pdata->xfer_type) {
+	case NAND_OMAP_PREFETCH_POLLED:
 		info->nand.read_buf   = omap_read_buf_pref;
 		info->nand.write_buf  = omap_write_buf_pref;
-		if (use_dma) {
-			err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
-				omap_nand_dma_cb, &info->comp, &info->dma_ch);
-			if (err < 0) {
-				info->dma_ch = -1;
-				printk(KERN_WARNING "DMA request failed."
-					" Non-dma data transfer mode\n");
-			} else {
-				omap_set_dma_dest_burst_mode(info->dma_ch,
-						OMAP_DMA_DATA_BURST_16);
-				omap_set_dma_src_burst_mode(info->dma_ch,
-						OMAP_DMA_DATA_BURST_16);
-
-				info->nand.read_buf   = omap_read_buf_dma_pref;
-				info->nand.write_buf  = omap_write_buf_dma_pref;
-			}
-		}
-	} else {
+		break;
+
+	case NAND_OMAP_POLLED:
 		if (info->nand.options & NAND_BUSWIDTH_16) {
 			info->nand.read_buf   = omap_read_buf16;
 			info->nand.write_buf  = omap_write_buf16;
@@ -871,20 +1006,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 			info->nand.read_buf   = omap_read_buf8;
 			info->nand.write_buf  = omap_write_buf8;
 		}
+		break;
+
+	case NAND_OMAP_PREFETCH_DMA:
+		err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
+				omap_nand_dma_cb, &info->comp, &info->dma_ch);
+		if (err < 0) {
+			info->dma_ch = -1;
+			dev_err(&pdev->dev, "DMA request failed!\n");
+			goto out_release_mem_region;
+		} else {
+			omap_set_dma_dest_burst_mode(info->dma_ch,
+					OMAP_DMA_DATA_BURST_16);
+			omap_set_dma_src_burst_mode(info->dma_ch,
+					OMAP_DMA_DATA_BURST_16);
+
+			info->nand.read_buf   = omap_read_buf_dma_pref;
+			info->nand.write_buf  = omap_write_buf_dma_pref;
+		}
+		break;
+
+	case NAND_OMAP_PREFETCH_IRQ:
+		err = request_irq(pdata->gpmc_irq,
+				omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
+		if (err) {
+			dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+							pdata->gpmc_irq, err);
+			goto out_release_mem_region;
+		} else {
+			info->gpmc_irq	     = pdata->gpmc_irq;
+			info->nand.read_buf  = omap_read_buf_irq_pref;
+			info->nand.write_buf = omap_write_buf_irq_pref;
+		}
+		break;
+
+	default:
+		dev_err(&pdev->dev,
+			"xfer_type(%d) not supported!\n", pdata->xfer_type);
+		err = -EINVAL;
+		goto out_release_mem_region;
 	}
-	info->nand.verify_buf = omap_verify_buf;
 
-#ifdef CONFIG_MTD_NAND_OMAP_HWECC
-	info->nand.ecc.bytes		= 3;
-	info->nand.ecc.size		= 512;
-	info->nand.ecc.calculate	= omap_calculate_ecc;
-	info->nand.ecc.hwctl		= omap_enable_hwecc;
-	info->nand.ecc.correct		= omap_correct_data;
-	info->nand.ecc.mode		= NAND_ECC_HW;
+	info->nand.verify_buf = omap_verify_buf;
 
-#else
-	info->nand.ecc.mode = NAND_ECC_SOFT;
-#endif
+	/* selsect the ecc type */
+	if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
+		info->nand.ecc.mode = NAND_ECC_SOFT;
+	else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
+		(pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
+		info->nand.ecc.bytes            = 3;
+		info->nand.ecc.size             = 512;
+		info->nand.ecc.calculate        = omap_calculate_ecc;
+		info->nand.ecc.hwctl            = omap_enable_hwecc;
+		info->nand.ecc.correct          = omap_correct_data;
+		info->nand.ecc.mode             = NAND_ECC_HW;
+	}
 
 	/* DIP switches on some boards change between 8 and 16 bit
 	 * bus widths for flash.  Try the other width if the first try fails.
@@ -897,6 +1073,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 		}
 	}
 
+	/* rom code layout */
+	if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
+
+		if (info->nand.options & NAND_BUSWIDTH_16)
+			offset = 2;
+		else {
+			offset = 1;
+			info->nand.badblock_pattern = &bb_descrip_flashbased;
+		}
+		omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
+		for (i = 0; i < omap_oobinfo.eccbytes; i++)
+			omap_oobinfo.eccpos[i] = i+offset;
+
+		omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
+		omap_oobinfo.oobfree->length = info->mtd.oobsize -
+					(offset + omap_oobinfo.eccbytes);
+
+		info->nand.ecc.layout = &omap_oobinfo;
+	}
+
 #ifdef CONFIG_MTD_PARTITIONS
 	err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
 	if (err > 0)
@@ -926,9 +1122,12 @@ static int omap_nand_remove(struct platform_device *pdev)
 							mtd);
 
 	platform_set_drvdata(pdev, NULL);
-	if (use_dma)
+	if (info->dma_ch != -1)
 		omap_free_dma(info->dma_ch);
 
+	if (info->gpmc_irq)
+		free_irq(info->gpmc_irq, info);
+
 	/* Release NAND device, its internal structures and partitions */
 	nand_release(&info->mtd);
 	iounmap(info->nand.IO_ADDR_R);
@@ -947,16 +1146,8 @@ static struct platform_driver omap_nand_driver = {
 
 static int __init omap_nand_init(void)
 {
-	printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+	pr_info("%s driver initializing\n", DRIVER_NAME);
 
-	/* This check is required if driver is being
-	 * loaded run time as a module
-	 */
-	if ((1 == use_dma) && (0 == use_prefetch)) {
-		printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
-				"without use_prefetch'. Prefetch will not be"
-				" used in either mode (mpu or dma)\n");
-	}
 	return platform_driver_register(&omap_nand_driver);
 }
 
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index c849cacf4b2f..14a49abe057e 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -63,7 +63,7 @@ struct omap2_onenand {
 	struct completion dma_done;
 	int dma_channel;
 	int freq;
-	int (*setup)(void __iomem *base, int freq);
+	int (*setup)(void __iomem *base, int *freq_ptr);
 	struct regulator *regulator;
 };
 
@@ -148,11 +148,9 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
 			wait_err("controller error", state, ctrl, intr);
 			return -EIO;
 		}
-		if ((intr & intr_flags) != intr_flags) {
-			wait_err("timeout", state, ctrl, intr);
-			return -EIO;
-		}
-		return 0;
+		if ((intr & intr_flags) == intr_flags)
+			return 0;
+		/* Continue in wait for interrupt branch */
 	}
 
 	if (state != FL_READING) {
@@ -581,7 +579,7 @@ static int __adjust_timing(struct device *dev, void *data)
 
 	/* DMA is not in use so this is all that is needed */
 	/* Revisit for OMAP3! */
-	ret = c->setup(c->onenand.base, c->freq);
+	ret = c->setup(c->onenand.base, &c->freq);
 
 	return ret;
 }
@@ -673,7 +671,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
 	}
 
 	if (pdata->onenand_setup != NULL) {
-		r = pdata->onenand_setup(c->onenand.base, c->freq);
+		r = pdata->onenand_setup(c->onenand.base, &c->freq);
 		if (r < 0) {
 			dev_err(&pdev->dev, "Onenand platform setup failed: "
 				"%d\n", r);
@@ -718,8 +716,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
 	}
 
 	dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
-		 "base %p\n", c->gpmc_cs, c->phys_base,
-		 c->onenand.base);
+		 "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
+		 c->onenand.base, c->freq);
 
 	c->pdev = pdev;
 	c->mtd.name = dev_name(&pdev->dev);
@@ -754,24 +752,6 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
 	if ((r = onenand_scan(&c->mtd, 1)) < 0)
 		goto err_release_regulator;
 
-	switch ((c->onenand.version_id >> 4) & 0xf) {
-	case 0:
-		c->freq = 40;
-		break;
-	case 1:
-		c->freq = 54;
-		break;
-	case 2:
-		c->freq = 66;
-		break;
-	case 3:
-		c->freq = 83;
-		break;
-	case 4:
-		c->freq = 104;
-		break;
-	}
-
 #ifdef CONFIG_MTD_PARTITIONS
 	r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
 	if (r > 0)
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index abb1ffbf3d20..36501adc125d 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2005, 2006 Nokia Corporation
  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
- *		Juha Yrjölä <juha.yrjola@nokia.com>
+ *		Juha Yrj�l� <juha.yrjola@nokia.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -33,6 +33,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/spi/spi.h>
 
@@ -46,7 +47,6 @@
 #define OMAP2_MCSPI_MAX_CTRL 		4
 
 #define OMAP2_MCSPI_REVISION		0x00
-#define OMAP2_MCSPI_SYSCONFIG		0x10
 #define OMAP2_MCSPI_SYSSTATUS		0x14
 #define OMAP2_MCSPI_IRQSTATUS		0x18
 #define OMAP2_MCSPI_IRQENABLE		0x1c
@@ -63,13 +63,6 @@
 
 /* per-register bitmasks: */
 
-#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE	BIT(4)
-#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP	BIT(2)
-#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE	BIT(0)
-#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET	BIT(1)
-
-#define OMAP2_MCSPI_SYSSTATUS_RESETDONE	BIT(0)
-
 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
@@ -122,13 +115,12 @@ struct omap2_mcspi {
 	spinlock_t		lock;
 	struct list_head	msg_queue;
 	struct spi_master	*master;
-	struct clk		*ick;
-	struct clk		*fck;
 	/* Virtual base address of the controller */
 	void __iomem		*base;
 	unsigned long		phys;
 	/* SPI1 has 4 channels, while SPI2 has 2 */
 	struct omap2_mcspi_dma	*dma_channels;
+	struct  device		*dev;
 };
 
 struct omap2_mcspi_cs {
@@ -144,7 +136,6 @@ struct omap2_mcspi_cs {
  * corresponding registers are modified.
  */
 struct omap2_mcspi_regs {
-	u32 sysconfig;
 	u32 modulctrl;
 	u32 wakeupenable;
 	struct list_head cs;
@@ -268,9 +259,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
 			omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);
 
-	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG,
-			omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig);
-
 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
 			omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);
 
@@ -280,20 +268,12 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
 }
 static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
 {
-	clk_disable(mcspi->ick);
-	clk_disable(mcspi->fck);
+	pm_runtime_put_sync(mcspi->dev);
 }
 
 static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
 {
-	if (clk_enable(mcspi->ick))
-		return -ENODEV;
-	if (clk_enable(mcspi->fck))
-		return -ENODEV;
-
-	omap2_mcspi_restore_ctx(mcspi);
-
-	return 0;
+	return pm_runtime_get_sync(mcspi->dev);
 }
 
 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
@@ -819,8 +799,9 @@ static int omap2_mcspi_setup(struct spi_device *spi)
 			return ret;
 	}
 
-	if (omap2_mcspi_enable_clocks(mcspi))
-		return -ENODEV;
+	ret = omap2_mcspi_enable_clocks(mcspi);
+	if (ret < 0)
+		return ret;
 
 	ret = omap2_mcspi_setup_transfer(spi, NULL);
 	omap2_mcspi_disable_clocks(mcspi);
@@ -863,10 +844,11 @@ static void omap2_mcspi_work(struct work_struct *work)
 	struct omap2_mcspi	*mcspi;
 
 	mcspi = container_of(work, struct omap2_mcspi, work);
-	spin_lock_irq(&mcspi->lock);
 
-	if (omap2_mcspi_enable_clocks(mcspi))
-		goto out;
+	if (omap2_mcspi_enable_clocks(mcspi) < 0)
+		return;
+
+	spin_lock_irq(&mcspi->lock);
 
 	/* We only enable one channel at a time -- the one whose message is
 	 * at the head of the queue -- although this controller would gladly
@@ -979,10 +961,9 @@ static void omap2_mcspi_work(struct work_struct *work)
 		spin_lock_irq(&mcspi->lock);
 	}
 
-	omap2_mcspi_disable_clocks(mcspi);
-
-out:
 	spin_unlock_irq(&mcspi->lock);
+
+	omap2_mcspi_disable_clocks(mcspi);
 }
 
 static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
@@ -1058,25 +1039,15 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
 	return 0;
 }
 
-static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
+static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
 {
 	struct spi_master	*master = mcspi->master;
 	u32			tmp;
+	int ret = 0;
 
-	if (omap2_mcspi_enable_clocks(mcspi))
-		return -1;
-
-	mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
-			OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
-	do {
-		tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
-	} while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
-
-	tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE |
-		OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP |
-		OMAP2_MCSPI_SYSCONFIG_SMARTIDLE;
-	mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp);
-	omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp;
+	ret = omap2_mcspi_enable_clocks(mcspi);
+	if (ret < 0)
+		return ret;
 
 	tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
 	mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp);
@@ -1087,91 +1058,26 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
 	return 0;
 }
 
-static u8 __initdata spi1_rxdma_id [] = {
-	OMAP24XX_DMA_SPI1_RX0,
-	OMAP24XX_DMA_SPI1_RX1,
-	OMAP24XX_DMA_SPI1_RX2,
-	OMAP24XX_DMA_SPI1_RX3,
-};
-
-static u8 __initdata spi1_txdma_id [] = {
-	OMAP24XX_DMA_SPI1_TX0,
-	OMAP24XX_DMA_SPI1_TX1,
-	OMAP24XX_DMA_SPI1_TX2,
-	OMAP24XX_DMA_SPI1_TX3,
-};
-
-static u8 __initdata spi2_rxdma_id[] = {
-	OMAP24XX_DMA_SPI2_RX0,
-	OMAP24XX_DMA_SPI2_RX1,
-};
-
-static u8 __initdata spi2_txdma_id[] = {
-	OMAP24XX_DMA_SPI2_TX0,
-	OMAP24XX_DMA_SPI2_TX1,
-};
-
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
-	|| defined(CONFIG_ARCH_OMAP4)
-static u8 __initdata spi3_rxdma_id[] = {
-	OMAP24XX_DMA_SPI3_RX0,
-	OMAP24XX_DMA_SPI3_RX1,
-};
+static int omap_mcspi_runtime_resume(struct device *dev)
+{
+	struct omap2_mcspi	*mcspi;
+	struct spi_master	*master;
 
-static u8 __initdata spi3_txdma_id[] = {
-	OMAP24XX_DMA_SPI3_TX0,
-	OMAP24XX_DMA_SPI3_TX1,
-};
-#endif
+	master = dev_get_drvdata(dev);
+	mcspi = spi_master_get_devdata(master);
+	omap2_mcspi_restore_ctx(mcspi);
 
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-static u8 __initdata spi4_rxdma_id[] = {
-	OMAP34XX_DMA_SPI4_RX0,
-};
+	return 0;
+}
 
-static u8 __initdata spi4_txdma_id[] = {
-	OMAP34XX_DMA_SPI4_TX0,
-};
-#endif
 
 static int __init omap2_mcspi_probe(struct platform_device *pdev)
 {
 	struct spi_master	*master;
+	struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data;
 	struct omap2_mcspi	*mcspi;
 	struct resource		*r;
 	int			status = 0, i;
-	const u8		*rxdma_id, *txdma_id;
-	unsigned		num_chipselect;
-
-	switch (pdev->id) {
-	case 1:
-		rxdma_id = spi1_rxdma_id;
-		txdma_id = spi1_txdma_id;
-		num_chipselect = 4;
-		break;
-	case 2:
-		rxdma_id = spi2_rxdma_id;
-		txdma_id = spi2_txdma_id;
-		num_chipselect = 2;
-		break;
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
-	|| defined(CONFIG_ARCH_OMAP4)
-	case 3:
-		rxdma_id = spi3_rxdma_id;
-		txdma_id = spi3_txdma_id;
-		num_chipselect = 2;
-		break;
-#endif
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-	case 4:
-		rxdma_id = spi4_rxdma_id;
-		txdma_id = spi4_txdma_id;
-		num_chipselect = 1;
-		break;
-#endif
-	default:
-		return -EINVAL;
-	}
 
 	master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
 	if (master == NULL) {
@@ -1188,7 +1094,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
 	master->setup = omap2_mcspi_setup;
 	master->transfer = omap2_mcspi_transfer;
 	master->cleanup = omap2_mcspi_cleanup;
-	master->num_chipselect = num_chipselect;
+	master->num_chipselect = pdata->num_cs;
 
 	dev_set_drvdata(&pdev->dev, master);
 
@@ -1206,49 +1112,62 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
 		goto err1;
 	}
 
+	r->start += pdata->regs_offset;
+	r->end += pdata->regs_offset;
 	mcspi->phys = r->start;
 	mcspi->base = ioremap(r->start, r->end - r->start + 1);
 	if (!mcspi->base) {
 		dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
 		status = -ENOMEM;
-		goto err1aa;
+		goto err2;
 	}
 
+	mcspi->dev = &pdev->dev;
 	INIT_WORK(&mcspi->work, omap2_mcspi_work);
 
 	spin_lock_init(&mcspi->lock);
 	INIT_LIST_HEAD(&mcspi->msg_queue);
 	INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs);
 
-	mcspi->ick = clk_get(&pdev->dev, "ick");
-	if (IS_ERR(mcspi->ick)) {
-		dev_dbg(&pdev->dev, "can't get mcspi_ick\n");
-		status = PTR_ERR(mcspi->ick);
-		goto err1a;
-	}
-	mcspi->fck = clk_get(&pdev->dev, "fck");
-	if (IS_ERR(mcspi->fck)) {
-		dev_dbg(&pdev->dev, "can't get mcspi_fck\n");
-		status = PTR_ERR(mcspi->fck);
-		goto err2;
-	}
-
 	mcspi->dma_channels = kcalloc(master->num_chipselect,
 			sizeof(struct omap2_mcspi_dma),
 			GFP_KERNEL);
 
 	if (mcspi->dma_channels == NULL)
-		goto err3;
+		goto err2;
+
+	for (i = 0; i < master->num_chipselect; i++) {
+		char dma_ch_name[14];
+		struct resource *dma_res;
+
+		sprintf(dma_ch_name, "rx%d", i);
+		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+							dma_ch_name);
+		if (!dma_res) {
+			dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
+			status = -ENODEV;
+			break;
+		}
 
-	for (i = 0; i < num_chipselect; i++) {
 		mcspi->dma_channels[i].dma_rx_channel = -1;
-		mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i];
+		mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
+		sprintf(dma_ch_name, "tx%d", i);
+		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+							dma_ch_name);
+		if (!dma_res) {
+			dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
+			status = -ENODEV;
+			break;
+		}
+
 		mcspi->dma_channels[i].dma_tx_channel = -1;
-		mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i];
+		mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
 	}
 
-	if (omap2_mcspi_reset(mcspi) < 0)
-		goto err4;
+	pm_runtime_enable(&pdev->dev);
+
+	if (status || omap2_mcspi_master_setup(mcspi) < 0)
+		goto err3;
 
 	status = spi_register_master(master);
 	if (status < 0)
@@ -1257,17 +1176,13 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
 	return status;
 
 err4:
-	kfree(mcspi->dma_channels);
+	spi_master_put(master);
 err3:
-	clk_put(mcspi->fck);
+	kfree(mcspi->dma_channels);
 err2:
-	clk_put(mcspi->ick);
-err1a:
-	iounmap(mcspi->base);
-err1aa:
 	release_mem_region(r->start, (r->end - r->start) + 1);
+	iounmap(mcspi->base);
 err1:
-	spi_master_put(master);
 	return status;
 }
 
@@ -1283,9 +1198,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
 	mcspi = spi_master_get_devdata(master);
 	dma_channels = mcspi->dma_channels;
 
-	clk_put(mcspi->fck);
-	clk_put(mcspi->ick);
-
+	omap2_mcspi_disable_clocks(mcspi);
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	release_mem_region(r->start, (r->end - r->start) + 1);
 
@@ -1336,6 +1249,7 @@ static int omap2_mcspi_resume(struct device *dev)
 
 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
 	.resume = omap2_mcspi_resume,
+	.runtime_resume	= omap_mcspi_runtime_resume,
 };
 
 static struct platform_driver omap2_mcspi_driver = {
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index a914010d9d12..630ae7f3cd4c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1530,7 +1530,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
 
 /*-------------------------------------------------------------------------*/
 
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \
+#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
 	defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \
 	defined(CONFIG_ARCH_U5500)
 
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 4f0dd2ed3964..4bd9e2145ee4 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -212,8 +212,8 @@ enum musb_g_ep0_state {
  * directly with the "flat" model, or after setting up an index register.
  */
 
-#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
-		|| defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN) \
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \
+		|| defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \
 		|| defined(CONFIG_ARCH_OMAP4)
 /* REVISIT indexed access seemed to
  * misbehave (on DaVinci) for at least peripheral IN ...
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 21056c924c74..320fd4afb93f 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -31,7 +31,7 @@
  *
  */
 
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430)
 #include "omap2430.h"
 #endif
 
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index e00fa1b22ecd..8c6fdef61d1c 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -1510,7 +1510,7 @@ isp1301_start_hnp(struct otg_transceiver *dev)
 
 /*-------------------------------------------------------------------------*/
 
-static int __init
+static int __devinit
 isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 {
 	int			status;
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 80b3b123dd7f..7c608c5ccf84 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -60,7 +60,7 @@ config W1_MASTER_GPIO
 
 config HDQ_MASTER_OMAP
 	tristate "OMAP HDQ driver"
-	depends on ARCH_OMAP2430 || ARCH_OMAP3
+	depends on SOC_OMAP2430 || ARCH_OMAP3
 	help
 	  Say Y here if you want support for the 1-wire or HDQ Interface
 	  on an OMAP processor.
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 3dd4971160ef..2b4acb86c191 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -124,6 +124,8 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
 	u32 pre_margin = GET_WLDR_VAL(timer_margin);
 	void __iomem *base = wdev->base;
 
+	pm_runtime_get_sync(wdev->dev);
+
 	/* just count up at 32 KHz */
 	while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
 		cpu_relax();
@@ -131,6 +133,8 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
 	__raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR);
 	while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
 		cpu_relax();
+
+	pm_runtime_put_sync(wdev->dev);
 }
 
 /*
@@ -160,6 +164,8 @@ static int omap_wdt_open(struct inode *inode, struct file *file)
 	omap_wdt_ping(wdev); /* trigger loading of new timeout value */
 	omap_wdt_enable(wdev);
 
+	pm_runtime_put_sync(wdev->dev);
+
 	return nonseekable_open(inode, file);
 }
 
@@ -171,6 +177,7 @@ static int omap_wdt_release(struct inode *inode, struct file *file)
 	 *      Shut off the timer unless NOWAYOUT is defined.
 	 */
 #ifndef CONFIG_WATCHDOG_NOWAYOUT
+	pm_runtime_get_sync(wdev->dev);
 
 	omap_wdt_disable(wdev);
 
@@ -190,9 +197,11 @@ static ssize_t omap_wdt_write(struct file *file, const char __user *data,
 
 	/* Refresh LOAD_TIME. */
 	if (len) {
+		pm_runtime_get_sync(wdev->dev);
 		spin_lock(&wdt_lock);
 		omap_wdt_ping(wdev);
 		spin_unlock(&wdt_lock);
+		pm_runtime_put_sync(wdev->dev);
 	}
 	return len;
 }
@@ -224,15 +233,18 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
 			return put_user(omap_prcm_get_reset_sources(),
 					(int __user *)arg);
 	case WDIOC_KEEPALIVE:
+		pm_runtime_get_sync(wdev->dev);
 		spin_lock(&wdt_lock);
 		omap_wdt_ping(wdev);
 		spin_unlock(&wdt_lock);
+		pm_runtime_put_sync(wdev->dev);
 		return 0;
 	case WDIOC_SETTIMEOUT:
 		if (get_user(new_margin, (int __user *)arg))
 			return -EFAULT;
 		omap_wdt_adjust_timeout(new_margin);
 
+		pm_runtime_get_sync(wdev->dev);
 		spin_lock(&wdt_lock);
 		omap_wdt_disable(wdev);
 		omap_wdt_set_timeout(wdev);
@@ -240,6 +252,7 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
 
 		omap_wdt_ping(wdev);
 		spin_unlock(&wdt_lock);
+		pm_runtime_put_sync(wdev->dev);
 		/* Fall */
 	case WDIOC_GETTIMEOUT:
 		return put_user(timer_margin, (int __user *)arg);
@@ -345,8 +358,11 @@ static void omap_wdt_shutdown(struct platform_device *pdev)
 {
 	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
 
-	if (wdev->omap_wdt_users)
+	if (wdev->omap_wdt_users) {
+		pm_runtime_get_sync(wdev->dev);
 		omap_wdt_disable(wdev);
+		pm_runtime_put_sync(wdev->dev);
+	}
 }
 
 static int __devexit omap_wdt_remove(struct platform_device *pdev)
@@ -381,8 +397,11 @@ static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
 
-	if (wdev->omap_wdt_users)
+	if (wdev->omap_wdt_users) {
+		pm_runtime_get_sync(wdev->dev);
 		omap_wdt_disable(wdev);
+		pm_runtime_put_sync(wdev->dev);
+	}
 
 	return 0;
 }
@@ -392,8 +411,10 @@ static int omap_wdt_resume(struct platform_device *pdev)
 	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
 
 	if (wdev->omap_wdt_users) {
+		pm_runtime_get_sync(wdev->dev);
 		omap_wdt_enable(wdev);
 		omap_wdt_ping(wdev);
+		pm_runtime_put_sync(wdev->dev);
 	}
 
 	return 0;