summary refs log tree commit diff
path: root/drivers/mtd
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 10:20:31 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 10:20:31 -0800
commita8e98d6d51a3eb7bb061b1625193a129c8bd094f (patch)
tree0fa58b6e11e37023b024e55b8f0e7e01438706d4 /drivers/mtd
parentf0f1b3364ae7f48084bdf2837fb979ff59622523 (diff)
parentf9f7dd222364a6428d2ad99a515935dd1dd89d18 (diff)
downloadlinux-a8e98d6d51a3eb7bb061b1625193a129c8bd094f.tar.gz
Merge git://git.infradead.org/mtd-2.6
* git://git.infradead.org/mtd-2.6: (120 commits)
  [MTD] Fix mtdoops.c compilation
  [MTD] [NOR] fix startup lock when using multiple nor flash chips
  [MTD] [DOC200x] eccbuf is statically defined and always evaluate to true
  [MTD] Fix maps/physmap.c compilation with CONFIG_PM
  [MTD] onenand: Add panic_write function to the onenand driver
  [MTD] mtdoops: Use the panic_write function when present
  [MTD] Add mtd panic_write function pointer
  [MTD] [NAND] Freescale enhanced Local Bus Controller FCM NAND support.
  [MTD] physmap.c: Add support for multiple resources
  [MTD] [NAND] Fix misparenthesization introduced by commit 78b65179...
  [MTD] [NAND] Fix Blackfin NFC ECC calculating bug with page size 512 bytes
  [MTD] [NAND] Remove wrong operation in PM function of the BF54x NFC driver
  [MTD] [NAND] Remove unused variable in plat_nand_remove
  [MTD] Unlocking all Intel flash that is locked on power up.
  [MTD] [NAND] at91_nand: Make mtdparts option can override board info
  [MTD] mtdoops: Various minor cleanups
  [MTD] mtdoops: Ensure sequential write to the buffer
  [MTD] mtdoops: Perform write operations in a workqueue
  [MTD] mtdoops: Add further error return code checking
  [MTD] [NOR] Test devtype, not definition in flash_probe(), drivers/mtd/devices/lart.c
  ...
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig11
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c78
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c14
-rw-r--r--drivers/mtd/chips/cfi_probe.c12
-rw-r--r--drivers/mtd/chips/gen_probe.c2
-rw-r--r--drivers/mtd/chips/jedec_probe.c1376
-rw-r--r--drivers/mtd/cmdlinepart.c9
-rw-r--r--drivers/mtd/devices/doc2000.c4
-rw-r--r--drivers/mtd/devices/doc2001plus.c2
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/physmap.c168
-rw-r--r--drivers/mtd/maps/physmap_of.c88
-rw-r--r--drivers/mtd/maps/pnc2000.c93
-rw-r--r--drivers/mtd/maps/scb2_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c8
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdoops.c185
-rw-r--r--drivers/mtd/mtdpart.c17
-rw-r--r--drivers/mtd/nand/Kconfig26
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/at91_nand.c12
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c39
-rw-r--r--drivers/mtd/nand/cafe_nand.c19
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c1244
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/orion_nand.c171
-rw-r--r--drivers/mtd/nand/pasemi_nand.c243
-rw-r--r--drivers/mtd/nand/plat_nand.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c48
-rw-r--r--drivers/mtd/ofpart.c74
-rw-r--r--drivers/mtd/onenand/onenand_base.c199
-rw-r--r--drivers/mtd/redboot.c25
-rw-r--r--drivers/mtd/ubi/build.c674
-rw-r--r--drivers/mtd/ubi/cdev.c244
-rw-r--r--drivers/mtd/ubi/debug.h21
-rw-r--r--drivers/mtd/ubi/eba.c321
-rw-r--r--drivers/mtd/ubi/gluebi.c9
-rw-r--r--drivers/mtd/ubi/io.c10
-rw-r--r--drivers/mtd/ubi/kapi.c177
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/scan.c12
-rw-r--r--drivers/mtd/ubi/ubi.h171
-rw-r--r--drivers/mtd/ubi/upd.c185
-rw-r--r--drivers/mtd/ubi/vmt.c208
-rw-r--r--drivers/mtd/ubi/vtbl.c45
-rw-r--r--drivers/mtd/ubi/wl.c338
51 files changed, 4605 insertions, 2013 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 8848e8ac705d..e8503341e3b1 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -150,6 +150,14 @@ config MTD_AFS_PARTS
 	  for your particular device. It won't happen automatically. The
 	  'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example.
 
+config MTD_OF_PARTS
+	tristate "Flash partition map based on OF description"
+	depends on PPC_OF && MTD_PARTITIONS
+	help
+	  This provides a partition parsing function which derives
+	  the partition map from the children of the flash node,
+	  as described in Documentation/powerpc/booting-without-of.txt.
+
 comment "User Modules And Translation Layers"
 
 config MTD_CHAR
@@ -286,6 +294,9 @@ config MTD_OOPS
 	  buffer in a flash partition where it can be read back at some
 	  later point.
 
+	  To use, add console=ttyMTDx to the kernel command line,
+	  where x is the MTD device number to use.
+
 source "drivers/mtd/chips/Kconfig"
 
 source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 7f0b04b4caa7..538e33d11d46 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_CONCAT)	+= mtdconcat.o
 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
 obj-$(CONFIG_MTD_AFS_PARTS)	+= afs.o
+obj-$(CONFIG_MTD_OF_PARTS)      += ofpart.o
 
 # 'Users' - code which presents functionality to userspace.
 obj-$(CONFIG_MTD_CHAR)		+= mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 1707f98c322c..47794d23a42e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -50,6 +50,7 @@
 #define I82802AC	0x00ac
 #define MANUFACTURER_ST         0x0020
 #define M50LPW080       0x002F
+#define AT49BV640D	0x02de
 
 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -157,6 +158,47 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
 }
 #endif
 
+/* Atmel chips don't use the same PRI format as Intel chips */
+static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+{
+	struct map_info *map = mtd->priv;
+	struct cfi_private *cfi = map->fldrv_priv;
+	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+	struct cfi_pri_atmel atmel_pri;
+	uint32_t features = 0;
+
+	/* Reverse byteswapping */
+	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
+	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
+	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
+
+	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
+	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
+
+	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
+
+	if (atmel_pri.Features & 0x01) /* chip erase supported */
+		features |= (1<<0);
+	if (atmel_pri.Features & 0x02) /* erase suspend supported */
+		features |= (1<<1);
+	if (atmel_pri.Features & 0x04) /* program suspend supported */
+		features |= (1<<2);
+	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
+		features |= (1<<9);
+	if (atmel_pri.Features & 0x20) /* page mode read supported */
+		features |= (1<<7);
+	if (atmel_pri.Features & 0x40) /* queued erase supported */
+		features |= (1<<4);
+	if (atmel_pri.Features & 0x80) /* Protection bits supported */
+		features |= (1<<6);
+
+	extp->FeatureSupport = features;
+
+	/* burst write mode not supported */
+	cfi->cfiq->BufWriteTimeoutTyp = 0;
+	cfi->cfiq->BufWriteTimeoutMax = 0;
+}
+
 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@@ -227,13 +269,20 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
 /*
  * Some chips power-up with all sectors locked by default.
  */
-static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
+static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
 {
-	printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
-	mtd->flags |= MTD_STUPID_LOCK;
+	struct map_info *map = mtd->priv;
+	struct cfi_private *cfi = map->fldrv_priv;
+	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+	if (cfip->FeatureSupport&32) {
+		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
+		mtd->flags |= MTD_POWERUP_LOCK;
+	}
 }
 
 static struct cfi_fixup cfi_fixup_table[] = {
+	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
 #endif
@@ -245,7 +294,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
 #endif
 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
-	{ MANUFACTURER_INTEL, 0x891c,	      fixup_use_powerup_lock, NULL, },
+	{ MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
 	{ 0, 0, NULL, NULL }
 };
 
@@ -277,7 +326,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
 		return NULL;
 
 	if (extp->MajorVersion != '1' ||
-	    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
 		       "version %c.%c.\n",  extp->MajorVersion,
 		       extp->MinorVersion);
@@ -752,6 +801,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 {
 	int ret;
+	DECLARE_WAITQUEUE(wait, current);
 
  retry:
 	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
@@ -808,6 +858,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
 			spin_unlock(contender->mutex);
 		}
 
+		/* Check if we already have suspended erase
+		 * on this chip. Sleep. */
+		if (mode == FL_ERASING && shared->erasing
+		    && shared->erasing->oldstate == FL_ERASING) {
+			spin_unlock(&shared->lock);
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			add_wait_queue(&chip->wq, &wait);
+			spin_unlock(chip->mutex);
+			schedule();
+			remove_wait_queue(&chip->wq, &wait);
+			spin_lock(chip->mutex);
+			goto retry;
+		}
+
 		/* We now own it */
 		shared->writing = chip;
 		if (mode == FL_ERASING)
@@ -2294,7 +2358,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
 	struct flchip *chip;
 	int ret = 0;
 
-	if ((mtd->flags & MTD_STUPID_LOCK)
+	if ((mtd->flags & MTD_POWERUP_LOCK)
 	    && extp && (extp->FeatureSupport & (1 << 5)))
 		cfi_intelext_save_locks(mtd);
 
@@ -2405,7 +2469,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
 		spin_unlock(chip->mutex);
 	}
 
-	if ((mtd->flags & MTD_STUPID_LOCK)
+	if ((mtd->flags & MTD_POWERUP_LOCK)
 	    && extp && (extp->FeatureSupport & (1 << 5)))
 		cfi_intelext_restore_locks(mtd);
 }
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 389acc600f5e..d072e87ce4e2 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -185,6 +185,10 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
 		extp->TopBottom = 2;
 	else
 		extp->TopBottom = 3;
+
+	/* burst write mode not supported */
+	cfi->cfiq->BufWriteTimeoutTyp = 0;
+	cfi->cfiq->BufWriteTimeoutMax = 0;
 }
 
 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
@@ -213,10 +217,11 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
 {
 	mtd->lock = cfi_atmel_lock;
 	mtd->unlock = cfi_atmel_unlock;
-	mtd->flags |= MTD_STUPID_LOCK;
+	mtd->flags |= MTD_POWERUP_LOCK;
 }
 
 static struct cfi_fixup cfi_fixup_table[] = {
+	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
 #ifdef AMD_BOOTLOC_BUG
 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
 #endif
@@ -229,7 +234,6 @@ static struct cfi_fixup cfi_fixup_table[] = {
 #if !FORCE_WORD_WRITE
 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
 #endif
-	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
 	{ 0, 0, NULL, NULL }
 };
 static struct cfi_fixup jedec_fixup_table[] = {
@@ -338,10 +342,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 		/* Modify the unlock address if we are in compatibility mode */
 		if (	/* x16 in x8 mode */
 			((cfi->device_type == CFI_DEVICETYPE_X8) &&
-				(cfi->cfiq->InterfaceDesc == 2)) ||
+				(cfi->cfiq->InterfaceDesc ==
+					CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
 			/* x32 in x16 mode */
 			((cfi->device_type == CFI_DEVICETYPE_X16) &&
-				(cfi->cfiq->InterfaceDesc == 4)))
+				(cfi->cfiq->InterfaceDesc ==
+					CFI_INTERFACE_X16_BY_X32_ASYNC)))
 		{
 			cfi->addr_unlock1 = 0xaaa;
 			cfi->addr_unlock2 = 0x555;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index 60e11a0ada97..f651b6ef1c5d 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -370,27 +370,27 @@ static void print_cfi_ident(struct cfi_ident *cfip)
 	printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
 	printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
 	switch(cfip->InterfaceDesc) {
-	case 0:
+	case CFI_INTERFACE_X8_ASYNC:
 		printk("  - x8-only asynchronous interface\n");
 		break;
 
-	case 1:
+	case CFI_INTERFACE_X16_ASYNC:
 		printk("  - x16-only asynchronous interface\n");
 		break;
 
-	case 2:
+	case CFI_INTERFACE_X8_BY_X16_ASYNC:
 		printk("  - supports x8 and x16 via BYTE# with asynchronous interface\n");
 		break;
 
-	case 3:
+	case CFI_INTERFACE_X32_ASYNC:
 		printk("  - x32-only asynchronous interface\n");
 		break;
 
-	case 4:
+	case CFI_INTERFACE_X16_BY_X32_ASYNC:
 		printk("  - supports x16 and x32 via Word# with asynchronous interface\n");
 		break;
 
-	case 65535:
+	case CFI_INTERFACE_NOT_ALLOWED:
 		printk("  - Not Allowed / Reserved\n");
 		break;
 
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index 2eb696d7b97b..d338b8c92780 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -112,7 +112,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
 		max_chips = 1;
 	}
 
-	mapsize = (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG;
+	mapsize = sizeof(long) * ( (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG );
 	chip_map = kzalloc(mapsize, GFP_KERNEL);
 	if (!chip_map) {
 		printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index a67b23b87fc0..4be51a86a85c 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -194,8 +194,8 @@ enum uaddr {
 
 
 struct unlock_addr {
-	u32 addr1;
-	u32 addr2;
+	uint32_t addr1;
+	uint32_t addr2;
 };
 
 
@@ -246,16 +246,16 @@ static const struct unlock_addr  unlock_addrs[] = {
 	}
 };
 
-
 struct amd_flash_info {
-	const __u16 mfr_id;
-	const __u16 dev_id;
 	const char *name;
-	const int DevSize;
-	const int NumEraseRegions;
-	const int CmdSet;
-	const __u8 uaddr[4];		/* unlock addrs for 8, 16, 32, 64 */
-	const ulong regions[6];
+	const uint16_t mfr_id;
+	const uint16_t dev_id;
+	const uint8_t dev_size;
+	const uint8_t nr_regions;
+	const uint16_t cmd_set;
+	const uint32_t regions[6];
+	const uint8_t devtypes;		/* Bitmask for x8, x16 etc. */
+	const uint8_t uaddr;		/* unlock addrs for 8, 16, 32, 64 */
 };
 
 #define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
@@ -280,12 +280,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29F032B,
 		.name		= "AMD AM29F032B",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_4MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.dev_size	= SIZE_4MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,64)
 		}
@@ -293,13 +292,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29LV160DT,
 		.name		= "AMD AM29LV160DT",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA   /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,31),
 			ERASEINFO(0x08000,1),
@@ -310,13 +307,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29LV160DB,
 		.name		= "AMD AM29LV160DB",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA   /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -327,13 +322,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29LV400BB,
 		.name		= "AMD AM29LV400BB",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -344,13 +337,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29LV400BT,
 		.name		= "AMD AM29LV400BT",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,7),
 			ERASEINFO(0x08000,1),
@@ -361,13 +352,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29LV800BB,
 		.name		= "AMD AM29LV800BB",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -379,13 +368,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29DL800BB,
 		.name		= "AMD AM29DL800BB",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 6,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 6,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x08000,1),
@@ -398,13 +385,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29DL800BT,
 		.name		= "AMD AM29DL800BT",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 6,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 6,
 		.regions	= {
 			ERASEINFO(0x10000,14),
 			ERASEINFO(0x04000,1),
@@ -417,13 +402,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29F800BB,
 		.name		= "AMD AM29F800BB",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -434,13 +417,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29LV800BT,
 		.name		= "AMD AM29LV800BT",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,15),
 			ERASEINFO(0x08000,1),
@@ -451,13 +432,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29F800BT,
 		.name		= "AMD AM29F800BT",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,15),
 			ERASEINFO(0x08000,1),
@@ -468,12 +447,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29F017D,
 		.name		= "AMD AM29F017D",
-		.uaddr		= {
-			[0] = MTD_UADDR_DONT_CARE     /* x8 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_DONT_CARE,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,32),
 		}
@@ -481,12 +459,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29F016D,
 		.name		= "AMD AM29F016D",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,32),
 		}
@@ -494,12 +471,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29F080,
 		.name		= "AMD AM29F080",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,16),
 		}
@@ -507,12 +483,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29F040,
 		.name		= "AMD AM29F040",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,8),
 		}
@@ -520,12 +495,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29LV040B,
 		.name		= "AMD AM29LV040B",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,8),
 		}
@@ -533,12 +507,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_AMD,
 		.dev_id		= AM29F002T,
 		.name		= "AMD AM29F002T",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_256KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_256KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,3),
 			ERASEINFO(0x08000,1),
@@ -549,12 +522,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ATMEL,
 		.dev_id		= AT49BV512,
 		.name		= "Atmel AT49BV512",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_64KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_64KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,1)
 		}
@@ -562,12 +534,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ATMEL,
 		.dev_id		= AT29LV512,
 		.name		= "Atmel AT29LV512",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_64KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_64KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x80,256),
 			ERASEINFO(0x80,256)
@@ -576,13 +547,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ATMEL,
 		.dev_id		= AT49BV16X,
 		.name		= "Atmel AT49BV16X",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x0AAA,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x0AAA   /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x0AAA,	/* ???? */
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000,8),
 			ERASEINFO(0x10000,31)
@@ -591,13 +560,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ATMEL,
 		.dev_id		= AT49BV16XT,
 		.name		= "Atmel AT49BV16XT",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x0AAA,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x0AAA   /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x0AAA,	/* ???? */
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000,31),
 			ERASEINFO(0x02000,8)
@@ -606,13 +573,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ATMEL,
 		.dev_id		= AT49BV32X,
 		.name		= "Atmel AT49BV32X",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x0AAA,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x0AAA   /* x16 */
-		},
-		.DevSize	= SIZE_4MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x0AAA,	/* ???? */
+		.dev_size	= SIZE_4MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000,8),
 			ERASEINFO(0x10000,63)
@@ -621,13 +586,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ATMEL,
 		.dev_id		= AT49BV32XT,
 		.name		= "Atmel AT49BV32XT",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x0AAA,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x0AAA   /* x16 */
-		},
-		.DevSize	= SIZE_4MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x0AAA,	/* ???? */
+		.dev_size	= SIZE_4MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000,63),
 			ERASEINFO(0x02000,8)
@@ -636,12 +599,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29F040C,
 		.name		= "Fujitsu MBM29F040C",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,8)
 		}
@@ -649,13 +611,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29F800BA,
 		.name		= "Fujitsu MBM29F800BA",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -666,12 +626,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29LV650UE,
 		.name		= "Fujitsu MBM29LV650UE",
-		.uaddr		= {
-			[0] = MTD_UADDR_DONT_CARE     /* x16 */
-		},
-		.DevSize	= SIZE_8MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_DONT_CARE,
+		.dev_size	= SIZE_8MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,128)
 		}
@@ -679,13 +638,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29LV320TE,
 		.name		= "Fujitsu MBM29LV320TE",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_4MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_4MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000,63),
 			ERASEINFO(0x02000,8)
@@ -694,13 +651,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29LV320BE,
 		.name		= "Fujitsu MBM29LV320BE",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_4MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_4MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000,8),
 			ERASEINFO(0x10000,63)
@@ -709,13 +664,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29LV160TE,
 		.name		= "Fujitsu MBM29LV160TE",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,31),
 			ERASEINFO(0x08000,1),
@@ -726,13 +679,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29LV160BE,
 		.name		= "Fujitsu MBM29LV160BE",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -743,13 +694,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29LV800BA,
 		.name		= "Fujitsu MBM29LV800BA",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -760,13 +709,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29LV800TA,
 		.name		= "Fujitsu MBM29LV800TA",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,15),
 			ERASEINFO(0x08000,1),
@@ -777,13 +724,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29LV400BC,
 		.name		= "Fujitsu MBM29LV400BC",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -794,13 +739,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_FUJITSU,
 		.dev_id		= MBM29LV400TC,
 		.name		= "Fujitsu MBM29LV400TC",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,7),
 			ERASEINFO(0x08000,1),
@@ -811,12 +754,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_HYUNDAI,
 		.dev_id		= HY29F002T,
 		.name		= "Hyundai HY29F002T",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_256KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_256KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,3),
 			ERASEINFO(0x08000,1),
@@ -827,12 +769,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F004B3B,
 		.name		= "Intel 28F004B3B",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000, 8),
 			ERASEINFO(0x10000, 7),
@@ -841,12 +782,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F004B3T,
 		.name		= "Intel 28F004B3T",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000, 7),
 			ERASEINFO(0x02000, 8),
@@ -855,13 +795,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F400B3B,
 		.name		= "Intel 28F400B3B",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-			[1] = MTD_UADDR_UNNECESSARY,    /* x16 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000, 8),
 			ERASEINFO(0x10000, 7),
@@ -870,13 +808,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F400B3T,
 		.name		= "Intel 28F400B3T",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-			[1] = MTD_UADDR_UNNECESSARY,    /* x16 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000, 7),
 			ERASEINFO(0x02000, 8),
@@ -885,12 +821,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F008B3B,
 		.name		= "Intel 28F008B3B",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000, 8),
 			ERASEINFO(0x10000, 15),
@@ -899,12 +834,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F008B3T,
 		.name		= "Intel 28F008B3T",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000, 15),
 			ERASEINFO(0x02000, 8),
@@ -913,12 +847,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F008S5,
 		.name		= "Intel 28F008S5",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_INTEL_EXT,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_INTEL_EXT,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,16),
 		}
@@ -926,12 +859,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F016S5,
 		.name		= "Intel 28F016S5",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_INTEL_EXT,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_INTEL_EXT,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,32),
 		}
@@ -939,12 +871,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F008SA,
 		.name		= "Intel 28F008SA",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000, 16),
 		}
@@ -952,12 +883,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F800B3B,
 		.name		= "Intel 28F800B3B",
-		.uaddr		= {
-			[1] = MTD_UADDR_UNNECESSARY,    /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000, 8),
 			ERASEINFO(0x10000, 15),
@@ -966,12 +896,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F800B3T,
 		.name		= "Intel 28F800B3T",
-		.uaddr		= {
-			[1] = MTD_UADDR_UNNECESSARY,    /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000, 15),
 			ERASEINFO(0x02000, 8),
@@ -980,12 +909,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F016B3B,
 		.name		= "Intel 28F016B3B",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000, 8),
 			ERASEINFO(0x10000, 31),
@@ -994,12 +922,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F016S3,
 		.name		= "Intel I28F016S3",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000, 32),
 		}
@@ -1007,12 +934,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F016B3T,
 		.name		= "Intel 28F016B3T",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000, 31),
 			ERASEINFO(0x02000, 8),
@@ -1021,12 +947,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F160B3B,
 		.name		= "Intel 28F160B3B",
-		.uaddr		= {
-			[1] = MTD_UADDR_UNNECESSARY,    /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000, 8),
 			ERASEINFO(0x10000, 31),
@@ -1035,12 +960,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F160B3T,
 		.name		= "Intel 28F160B3T",
-		.uaddr		= {
-			[1] = MTD_UADDR_UNNECESSARY,    /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000, 31),
 			ERASEINFO(0x02000, 8),
@@ -1049,12 +973,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F320B3B,
 		.name		= "Intel 28F320B3B",
-		.uaddr		= {
-			[1] = MTD_UADDR_UNNECESSARY,    /* x16 */
-		},
-		.DevSize	= SIZE_4MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_4MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000, 8),
 			ERASEINFO(0x10000, 63),
@@ -1063,12 +986,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F320B3T,
 		.name		= "Intel 28F320B3T",
-		.uaddr		= {
-			[1] = MTD_UADDR_UNNECESSARY,    /* x16 */
-		},
-		.DevSize	= SIZE_4MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_4MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000, 63),
 			ERASEINFO(0x02000, 8),
@@ -1077,12 +999,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F640B3B,
 		.name		= "Intel 28F640B3B",
-		.uaddr		= {
-			[1] = MTD_UADDR_UNNECESSARY,    /* x16 */
-		},
-		.DevSize	= SIZE_8MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_8MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000, 8),
 			ERASEINFO(0x10000, 127),
@@ -1091,12 +1012,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I28F640B3T,
 		.name		= "Intel 28F640B3T",
-		.uaddr		= {
-			[1] = MTD_UADDR_UNNECESSARY,    /* x16 */
-		},
-		.DevSize	= SIZE_8MiB,
-		.CmdSet		= P_ID_INTEL_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_8MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000, 127),
 			ERASEINFO(0x02000, 8),
@@ -1105,12 +1025,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I82802AB,
 		.name		= "Intel 82802AB",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_INTEL_EXT,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_INTEL_EXT,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,8),
 		}
@@ -1118,12 +1037,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_INTEL,
 		.dev_id		= I82802AC,
 		.name		= "Intel 82802AC",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_INTEL_EXT,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_INTEL_EXT,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,16),
 		}
@@ -1131,12 +1049,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_MACRONIX,
 		.dev_id		= MX29LV040C,
 		.name		= "Macronix MX29LV040C",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA,  /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,8),
 		}
@@ -1144,13 +1061,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_MACRONIX,
 		.dev_id		= MX29LV160T,
 		.name		= "MXIC MX29LV160T",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,31),
 			ERASEINFO(0x08000,1),
@@ -1161,13 +1076,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_NEC,
 		.dev_id		= UPD29F064115,
 		.name		= "NEC uPD29F064115",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_8MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 3,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,	/* ???? */
+		.dev_size	= SIZE_8MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 3,
 		.regions	= {
 			ERASEINFO(0x2000,8),
 			ERASEINFO(0x10000,126),
@@ -1177,13 +1090,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_MACRONIX,
 		.dev_id		= MX29LV160B,
 		.name		= "MXIC MX29LV160B",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -1194,12 +1105,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_MACRONIX,
 		.dev_id		= MX29F040,
 		.name		= "Macronix MX29F040",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,8),
 		}
@@ -1207,12 +1117,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_MACRONIX,
 		.dev_id		= MX29F016,
 		.name		= "Macronix MX29F016",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,32),
 		}
@@ -1220,12 +1129,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_MACRONIX,
 		.dev_id		= MX29F004T,
 		.name		= "Macronix MX29F004T",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,7),
 			ERASEINFO(0x08000,1),
@@ -1236,12 +1144,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_MACRONIX,
 		.dev_id		= MX29F004B,
 		.name		= "Macronix MX29F004B",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -1252,12 +1159,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_MACRONIX,
 		.dev_id		= MX29F002T,
 		.name		= "Macronix MX29F002T",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_256KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_256KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,3),
 			ERASEINFO(0x08000,1),
@@ -1268,12 +1174,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_PMC,
 		.dev_id		= PM49FL002,
 		.name		= "PMC Pm49FL002",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_256KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_256KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO( 0x01000, 64 )
 		}
@@ -1281,12 +1186,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_PMC,
 		.dev_id		= PM49FL004,
 		.name		= "PMC Pm49FL004",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO( 0x01000, 128 )
 		}
@@ -1294,12 +1198,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_PMC,
 		.dev_id		= PM49FL008,
 		.name		= "PMC Pm49FL008",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO( 0x01000, 256 )
 		}
@@ -1307,25 +1210,23 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SHARP,
 		.dev_id		= LH28F640BF,
 		.name		= "LH28F640BF",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_4MiB,
-		.CmdSet         = P_ID_INTEL_STD,
-		.NumEraseRegions= 1,
-		.regions        = {
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_4MiB,
+		.cmd_set	= P_ID_INTEL_STD,
+		.nr_regions	= 1,
+		.regions	= {
 			ERASEINFO(0x40000,16),
 		}
         }, {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST39LF512,
 		.name		= "SST 39LF512",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_64KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_64KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,16),
 		}
@@ -1333,12 +1234,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST39LF010,
 		.name		= "SST 39LF010",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_128KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_128KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,32),
 		}
@@ -1346,36 +1246,33 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SST,
  		.dev_id 	= SST29EE020,
 		.name		= "SST 29EE020",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
- 		.DevSize	= SIZE_256KiB,
- 		.CmdSet		= P_ID_SST_PAGE,
- 		.NumEraseRegions= 1,
- 		.regions = {ERASEINFO(0x01000,64),
- 		}
-         }, {
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_256KiB,
+		.cmd_set	= P_ID_SST_PAGE,
+		.nr_regions	= 1,
+		.regions = {ERASEINFO(0x01000,64),
+		}
+	}, {
  		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST29LE020,
  		.name		= "SST 29LE020",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
- 		.DevSize	= SIZE_256KiB,
- 		.CmdSet		= P_ID_SST_PAGE,
- 		.NumEraseRegions= 1,
- 		.regions = {ERASEINFO(0x01000,64),
- 		}
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_256KiB,
+		.cmd_set	= P_ID_SST_PAGE,
+		.nr_regions	= 1,
+		.regions = {ERASEINFO(0x01000,64),
+		}
 	}, {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST39LF020,
 		.name		= "SST 39LF020",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_256KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_256KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,64),
 		}
@@ -1383,12 +1280,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST39LF040,
 		.name		= "SST 39LF040",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,128),
 		}
@@ -1396,12 +1292,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST39SF010A,
 		.name		= "SST 39SF010A",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_128KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_128KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,32),
 		}
@@ -1409,26 +1304,24 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST39SF020A,
 		.name		= "SST 39SF020A",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_256KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_256KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,64),
 		}
 	}, {
 		.mfr_id		= MANUFACTURER_SST,
-		.dev_id         = SST49LF040B,
-		.name           = "SST 49LF040B",
-		.uaddr          = {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize        = SIZE_512KiB,
-		.CmdSet         = P_ID_AMD_STD,
-		.NumEraseRegions= 1,
-		.regions        = {
+		.dev_id		= SST49LF040B,
+		.name		= "SST 49LF040B",
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
+		.regions	= {
 			ERASEINFO(0x01000,128),
 		}
 	}, {
@@ -1436,12 +1329,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST49LF004B,
 		.name		= "SST 49LF004B",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,128),
 		}
@@ -1449,12 +1341,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST49LF008A,
 		.name		= "SST 49LF008A",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,256),
 		}
@@ -1462,12 +1353,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST49LF030A,
 		.name		= "SST 49LF030A",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,96),
 		}
@@ -1475,12 +1365,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST49LF040A,
 		.name		= "SST 49LF040A",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,128),
 		}
@@ -1488,57 +1377,49 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_SST,
 		.dev_id		= SST49LF080A,
 		.name		= "SST 49LF080A",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x01000,256),
 		}
 	}, {
-               .mfr_id         = MANUFACTURER_SST,     /* should be CFI */
-               .dev_id         = SST39LF160,
-               .name           = "SST 39LF160",
-               .uaddr          = {
-                       [0] = MTD_UADDR_0x5555_0x2AAA,  /* x8 */
-                       [1] = MTD_UADDR_0x5555_0x2AAA   /* x16 */
-               },
-               .DevSize        = SIZE_2MiB,
-               .CmdSet         = P_ID_AMD_STD,
-               .NumEraseRegions= 2,
-               .regions        = {
-                       ERASEINFO(0x1000,256),
-                       ERASEINFO(0x1000,256)
-               }
-	}, {
-               .mfr_id         = MANUFACTURER_SST,     /* should be CFI */
-               .dev_id         = SST39VF1601,
-               .name           = "SST 39VF1601",
-               .uaddr          = {
-                       [0] = MTD_UADDR_0x5555_0x2AAA,  /* x8 */
-                       [1] = MTD_UADDR_0x5555_0x2AAA   /* x16 */
-               },
-               .DevSize        = SIZE_2MiB,
-               .CmdSet         = P_ID_AMD_STD,
-               .NumEraseRegions= 2,
-               .regions        = {
-                       ERASEINFO(0x1000,256),
-                       ERASEINFO(0x1000,256)
-               }
-
+		.mfr_id		= MANUFACTURER_SST,     /* should be CFI */
+		.dev_id		= SST39LF160,
+		.name		= "SST 39LF160",
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,	/* ???? */
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
+		.regions	= {
+			ERASEINFO(0x1000,256),
+			ERASEINFO(0x1000,256)
+		}
+	}, {
+		.mfr_id		= MANUFACTURER_SST,     /* should be CFI */
+		.dev_id		= SST39VF1601,
+		.name		= "SST 39VF1601",
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,	/* ???? */
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
+		.regions	= {
+			ERASEINFO(0x1000,256),
+			ERASEINFO(0x1000,256)
+		}
 	}, {
 		.mfr_id		= MANUFACTURER_ST,
 		.dev_id		= M29F800AB,
 		.name		= "ST M29F800AB",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -1549,13 +1430,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ST,	/* FIXME - CFI device? */
 		.dev_id		= M29W800DT,
 		.name		= "ST M29W800DT",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA,  /* x8 */
-			[1] = MTD_UADDR_0x5555_0x2AAA   /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,	/* ???? */
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,15),
 			ERASEINFO(0x08000,1),
@@ -1566,13 +1445,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ST,	/* FIXME - CFI device? */
 		.dev_id		= M29W800DB,
 		.name		= "ST M29W800DB",
- 		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA,  /* x8 */
-			[1] = MTD_UADDR_0x5555_0x2AAA   /* x16 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,	/* ???? */
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -1583,13 +1460,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ST,	/* FIXME - CFI device? */
 		.dev_id		= M29W160DT,
 		.name		= "ST M29W160DT",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,	/* ???? */
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,31),
 			ERASEINFO(0x08000,1),
@@ -1600,13 +1475,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ST,	/* FIXME - CFI device? */
 		.dev_id		= M29W160DB,
 		.name		= "ST M29W160DB",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA,  /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA,  /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,	/* ???? */
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -1617,12 +1490,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ST,
 		.dev_id		= M29W040B,
 		.name		= "ST M29W040B",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0555_0x02AA,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,8),
 		}
@@ -1630,12 +1502,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ST,
 		.dev_id		= M50FW040,
 		.name		= "ST M50FW040",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_512KiB,
-		.CmdSet		= P_ID_INTEL_EXT,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_512KiB,
+		.cmd_set	= P_ID_INTEL_EXT,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,8),
 		}
@@ -1643,12 +1514,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ST,
 		.dev_id		= M50FW080,
 		.name		= "ST M50FW080",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_INTEL_EXT,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_INTEL_EXT,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,16),
 		}
@@ -1656,12 +1526,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ST,
 		.dev_id		= M50FW016,
 		.name		= "ST M50FW016",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_INTEL_EXT,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_INTEL_EXT,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,32),
 		}
@@ -1669,12 +1538,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_ST,
 		.dev_id		= M50LPW080,
 		.name		= "ST M50LPW080",
-		.uaddr		= {
-			[0] = MTD_UADDR_UNNECESSARY,    /* x8 */
-		},
-		.DevSize	= SIZE_1MiB,
-		.CmdSet		= P_ID_INTEL_EXT,
-		.NumEraseRegions= 1,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_UNNECESSARY,
+		.dev_size	= SIZE_1MiB,
+		.cmd_set	= P_ID_INTEL_EXT,
+		.nr_regions	= 1,
 		.regions	= {
 			ERASEINFO(0x10000,16),
 		}
@@ -1682,13 +1550,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_TOSHIBA,
 		.dev_id		= TC58FVT160,
 		.name		= "Toshiba TC58FVT160",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA  /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000,31),
 			ERASEINFO(0x08000,1),
@@ -1699,13 +1565,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_TOSHIBA,
 		.dev_id		= TC58FVB160,
 		.name		= "Toshiba TC58FVB160",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA  /* x16 */
-		},
-		.DevSize	= SIZE_2MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_2MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x04000,1),
 			ERASEINFO(0x02000,2),
@@ -1716,13 +1580,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_TOSHIBA,
 		.dev_id		= TC58FVB321,
 		.name		= "Toshiba TC58FVB321",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA  /* x16 */
-		},
-		.DevSize	= SIZE_4MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_4MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000,8),
 			ERASEINFO(0x10000,63)
@@ -1731,13 +1593,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_TOSHIBA,
 		.dev_id		= TC58FVT321,
 		.name		= "Toshiba TC58FVT321",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA  /* x16 */
-		},
-		.DevSize	= SIZE_4MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_4MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000,63),
 			ERASEINFO(0x02000,8)
@@ -1746,13 +1606,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_TOSHIBA,
 		.dev_id		= TC58FVB641,
 		.name		= "Toshiba TC58FVB641",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
-		},
-		.DevSize	= SIZE_8MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_8MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x02000,8),
 			ERASEINFO(0x10000,127)
@@ -1761,13 +1619,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_TOSHIBA,
 		.dev_id		= TC58FVT641,
 		.name		= "Toshiba TC58FVT641",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
-			[1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
-		},
-		.DevSize	= SIZE_8MiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 2,
+		.devtypes	= CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x0AAA_0x0555,
+		.dev_size	= SIZE_8MiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 2,
 		.regions	= {
 			ERASEINFO(0x10000,127),
 			ERASEINFO(0x02000,8)
@@ -1776,12 +1632,11 @@ static const struct amd_flash_info jedec_table[] = {
 		.mfr_id		= MANUFACTURER_WINBOND,
 		.dev_id		= W49V002A,
 		.name		= "Winbond W49V002A",
-		.uaddr		= {
-			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
-		},
-		.DevSize	= SIZE_256KiB,
-		.CmdSet		= P_ID_AMD_STD,
-		.NumEraseRegions= 4,
+		.devtypes	= CFI_DEVICETYPE_X8,
+		.uaddr		= MTD_UADDR_0x5555_0x2AAA,
+		.dev_size	= SIZE_256KiB,
+		.cmd_set	= P_ID_AMD_STD,
+		.nr_regions	= 4,
 		.regions	= {
 			ERASEINFO(0x10000, 3),
 			ERASEINFO(0x08000, 1),
@@ -1791,15 +1646,7 @@ static const struct amd_flash_info jedec_table[] = {
 	}
 };
 
-
-static int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
-
-static int jedec_probe_chip(struct map_info *map, __u32 base,
-			    unsigned long *chip_map, struct cfi_private *cfi);
-
-static struct mtd_info *jedec_probe(struct map_info *map);
-
-static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
+static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
 	struct cfi_private *cfi)
 {
 	map_word result;
@@ -1810,7 +1657,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
 	return result.x[0] & mask;
 }
 
-static inline u32 jedec_read_id(struct map_info *map, __u32 base,
+static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
 	struct cfi_private *cfi)
 {
 	map_word result;
@@ -1821,8 +1668,7 @@ static inline u32 jedec_read_id(struct map_info *map, __u32 base,
 	return result.x[0] & mask;
 }
 
-static inline void jedec_reset(u32 base, struct map_info *map,
-	struct cfi_private *cfi)
+static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
 {
 	/* Reset */
 
@@ -1832,7 +1678,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
 	 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
 	 * as they will ignore the writes and dont care what address
 	 * the F0 is written to */
-	if(cfi->addr_unlock1) {
+	if (cfi->addr_unlock1) {
 		DEBUG( MTD_DEBUG_LEVEL3,
 		       "reset unlock called %x %x \n",
 		       cfi->addr_unlock1,cfi->addr_unlock2);
@@ -1841,7 +1687,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
 	}
 
 	cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
-	/* Some misdesigned intel chips do not respond for 0xF0 for a reset,
+	/* Some misdesigned Intel chips do not respond for 0xF0 for a reset,
 	 * so ensure we're in read mode.  Send both the Intel and the AMD command
 	 * for this.  Intel uses 0xff for this, AMD uses 0xff for NOP, so
 	 * this should be safe.
@@ -1851,42 +1697,20 @@ static inline void jedec_reset(u32 base, struct map_info *map,
 }
 
 
-static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type)
-{
-	int uaddr_idx;
-	__u8 uaddr = MTD_UADDR_NOT_SUPPORTED;
-
-	switch ( device_type ) {
-	case CFI_DEVICETYPE_X8:  uaddr_idx = 0; break;
-	case CFI_DEVICETYPE_X16: uaddr_idx = 1; break;
-	case CFI_DEVICETYPE_X32: uaddr_idx = 2; break;
-	default:
-		printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n",
-		       __func__, device_type);
-		goto uaddr_done;
-	}
-
-	uaddr = finfo->uaddr[uaddr_idx];
-
-	if (uaddr != MTD_UADDR_NOT_SUPPORTED ) {
-		/* ASSERT("The unlock addresses for non-8-bit mode
-		   are bollocks. We don't really need an array."); */
-		uaddr = finfo->uaddr[0];
-	}
-
- uaddr_done:
-	return uaddr;
-}
-
-
 static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
 {
 	int i,num_erase_regions;
-	__u8 uaddr;
+	uint8_t uaddr;
 
-	printk("Found: %s\n",jedec_table[index].name);
+	if (! (jedec_table[index].devtypes & p_cfi->device_type)) {
+		DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
+		      jedec_table[index].name, 4 * (1<<p_cfi->device_type));
+		return 0;
+	}
+
+	printk(KERN_INFO "Found: %s\n",jedec_table[index].name);
 
-	num_erase_regions = jedec_table[index].NumEraseRegions;
+	num_erase_regions = jedec_table[index].nr_regions;
 
 	p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
 	if (!p_cfi->cfiq) {
@@ -1896,9 +1720,9 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
 
 	memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
 
-	p_cfi->cfiq->P_ID = jedec_table[index].CmdSet;
-	p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions;
-	p_cfi->cfiq->DevSize = jedec_table[index].DevSize;
+	p_cfi->cfiq->P_ID = jedec_table[index].cmd_set;
+	p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
+	p_cfi->cfiq->DevSize = jedec_table[index].dev_size;
 	p_cfi->cfi_mode = CFI_MODE_JEDEC;
 
 	for (i=0; i<num_erase_regions; i++){
@@ -1910,14 +1734,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
 	p_cfi->mfr = jedec_table[index].mfr_id;
 	p_cfi->id = jedec_table[index].dev_id;
 
-	uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type);
-	if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
-		kfree( p_cfi->cfiq );
-		return 0;
-	}
+	uaddr = jedec_table[index].uaddr;
 
-	p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1;
-	p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2;
+	/* The table has unlock addresses in _bytes_, and we try not to let
+	   our brains explode when we see the datasheets talking about address
+	   lines numbered from A-1 to A18. The CFI table has unlock addresses
+	   in device-words according to the mode the device is connected in */
+	p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
+	p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
 
 	return 1; 	/* ok */
 }
@@ -1930,14 +1754,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
  * be perfect - consequently there should be some module parameters that
  * could be manually specified to force the chip info.
  */
-static inline int jedec_match( __u32 base,
+static inline int jedec_match( uint32_t base,
 			       struct map_info *map,
 			       struct cfi_private *cfi,
 			       const struct amd_flash_info *finfo )
 {
 	int rc = 0;           /* failure until all tests pass */
 	u32 mfr, id;
-	__u8 uaddr;
+	uint8_t uaddr;
 
 	/*
 	 * The IDs must match.  For X16 and X32 devices operating in
@@ -1950,8 +1774,8 @@ static inline int jedec_match( __u32 base,
 	 */
 	switch (cfi->device_type) {
 	case CFI_DEVICETYPE_X8:
-		mfr = (__u8)finfo->mfr_id;
-		id = (__u8)finfo->dev_id;
+		mfr = (uint8_t)finfo->mfr_id;
+		id = (uint8_t)finfo->dev_id;
 
 		/* bjd: it seems that if we do this, we can end up
 		 * detecting 16bit flashes as an 8bit device, even though
@@ -1964,12 +1788,12 @@ static inline int jedec_match( __u32 base,
 		}
 		break;
 	case CFI_DEVICETYPE_X16:
-		mfr = (__u16)finfo->mfr_id;
-		id = (__u16)finfo->dev_id;
+		mfr = (uint16_t)finfo->mfr_id;
+		id = (uint16_t)finfo->dev_id;
 		break;
 	case CFI_DEVICETYPE_X32:
-		mfr = (__u16)finfo->mfr_id;
-		id = (__u32)finfo->dev_id;
+		mfr = (uint16_t)finfo->mfr_id;
+		id = (uint32_t)finfo->dev_id;
 		break;
 	default:
 		printk(KERN_WARNING
@@ -1984,25 +1808,25 @@ static inline int jedec_match( __u32 base,
 	/* the part size must fit in the memory window */
 	DEBUG( MTD_DEBUG_LEVEL3,
 	       "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
-	       __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) );
-	if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) {
+	       __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
+	if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
 		DEBUG( MTD_DEBUG_LEVEL3,
 		       "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
 		       __func__, finfo->mfr_id, finfo->dev_id,
-		       1 << finfo->DevSize );
+		       1 << finfo->dev_size );
 		goto match_done;
 	}
 
-	uaddr = finfo_uaddr(finfo, cfi->device_type);
-	if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
+	if (! (finfo->devtypes & cfi->device_type))
 		goto match_done;
-	}
+
+	uaddr = finfo->uaddr;
 
 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
 	       __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
 	if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
-	     && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 ||
-		  unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) {
+	     && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
+		  unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
 		DEBUG( MTD_DEBUG_LEVEL3,
 			"MTD %s(): 0x%.4x 0x%.4x did not match\n",
 			__func__,
@@ -2042,7 +1866,7 @@ static inline int jedec_match( __u32 base,
 	 * were truly frobbing a real device.
 	 */
 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
-	if(cfi->addr_unlock1) {
+	if (cfi->addr_unlock1) {
 		cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
 		cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
 	}
@@ -2068,8 +1892,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
 		if (MTD_UADDR_UNNECESSARY == uaddr_idx)
 			return 0;
 
-		cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1;
-		cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2;
+		cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type;
+		cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type;
 	}
 
 	/* Make certain we aren't probing past the end of map */
@@ -2081,19 +1905,11 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
 
 	}
 	/* Ensure the unlock addresses we try stay inside the map */
-	probe_offset1 = cfi_build_cmd_addr(
-		cfi->addr_unlock1,
-		cfi_interleave(cfi),
-		cfi->device_type);
-	probe_offset2 = cfi_build_cmd_addr(
-		cfi->addr_unlock1,
-		cfi_interleave(cfi),
-		cfi->device_type);
+	probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type);
+	probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type);
 	if (	((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
 		((base + probe_offset2 + map_bankwidth(map)) >= map->size))
-	{
 		goto retry;
-	}
 
 	/* Reset */
 	jedec_reset(base, map, cfi);
@@ -2128,8 +1944,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
 		}
 		goto retry;
 	} else {
-		__u16 mfr;
-		__u16 id;
+		uint16_t mfr;
+		uint16_t id;
 
 		/* Make sure it is a chip of the same manufacturer and id */
 		mfr = jedec_read_mfr(map, base, cfi);
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 23fab14f1637..b44292abd9f7 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -9,7 +9,7 @@
  *
  * mtdparts=<mtddef>[;<mtddef]
  * <mtddef>  := <mtd-id>:<partdef>[,<partdef>]
- * <partdef> := <size>[@offset][<name>][ro]
+ * <partdef> := <size>[@offset][<name>][ro][lk]
  * <mtd-id>  := unique name used in mapping driver/device (mtd->name)
  * <size>    := standard linux memsize OR "-" to denote all remaining space
  * <name>    := '(' NAME ')'
@@ -143,6 +143,13 @@ static struct mtd_partition * newpart(char *s,
 		s += 2;
         }
 
+        /* if lk is found do NOT unlock the MTD partition*/
+        if (strncmp(s, "lk", 2) == 0)
+	{
+		mask_flags |= MTD_POWERUP_LOCK;
+		s += 2;
+        }
+
 	/* test if more partitions are following */
 	if (*s == ',')
 	{
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 90acf57c19bd..846989f292e3 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -632,7 +632,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
 			len = ((from | 0x1ff) + 1) - from;
 
 		/* The ECC will not be calculated correctly if less than 512 is read */
-		if (len != 0x200 && eccbuf)
+		if (len != 0x200)
 			printk(KERN_WARNING
 			       "ECC needs a full sector read (adr: %lx size %lx)\n",
 			       (long) from, (long) len);
@@ -896,7 +896,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
 		/* Let the caller know we completed it */
 		*retlen += len;
 
-		if (eccbuf) {
+		{
 			unsigned char x[8];
 			size_t dummy;
 			int ret;
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 2b30b587c6e8..83be3461658f 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -748,7 +748,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
 	WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
 
 	/* On interleaved devices the flags for 2nd half 512 are before data */
-	if (eccbuf && before)
+	if (before)
 		fto -= 2;
 
 	/* issue the Serial Data In command to initial the Page Program process */
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 4ea50a1dda85..99fd210feaec 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -323,7 +323,7 @@ static int flash_probe (void)
    /* put the flash back into command mode */
    write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000);
 
-   return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM));
+   return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || devtype == FLASH_DEVICE_16mbit_BOTTOM));
 }
 
 /*
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index a5ed6d232c35..b35e4813a3a5 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -420,7 +420,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
 		status = dataflash_waitready(priv->spi);
 
 		/* Check result of the compare operation */
-		if ((status & (1 << 6)) == 1) {
+		if (status & (1 << 6)) {
 			printk(KERN_ERR "%s: compare page %u, err %d\n",
 				spi->dev.bus_id, pageaddr, status);
 			remaining = 0;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index a592fc04cf78..12c253664eb2 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -110,13 +110,6 @@ config MTD_SUN_UFLASH
 	  Sun Microsystems boardsets.  This driver will require CFI support
 	  in the kernel, so if you did not enable CFI previously, do that now.
 
-config MTD_PNC2000
-	tristate "CFI Flash device mapped on Photron PNC-2000"
-	depends on X86 && MTD_CFI && MTD_PARTITIONS
-	help
-	  PNC-2000 is the name of Network Camera product from PHOTRON
-	  Ltd. in Japan. It uses CFI-compliant flash.
-
 config MTD_SC520CDP
 	tristate "CFI Flash device mapped on AMD SC520 CDP"
 	depends on X86 && MTD_CFI && MTD_CONCAT
@@ -576,7 +569,7 @@ config MTD_BAST_MAXSIZE
 	default "4"
 
 config MTD_SHARP_SL
-	bool "ROM mapped on Sharp SL Series"
+	tristate "ROM mapped on Sharp SL Series"
 	depends on ARCH_PXA
 	help
 	  This enables access to the flash chip on the Sharp SL Series of PDAs.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 316382a1401b..a9cbe80f99a0 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -28,7 +28,6 @@ obj-$(CONFIG_MTD_PHYSMAP)	+= physmap.o
 obj-$(CONFIG_MTD_PHYSMAP_OF)	+= physmap_of.o
 obj-$(CONFIG_MTD_PMC_MSP_EVM)   += pmcmsp-flash.o
 obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
-obj-$(CONFIG_MTD_PNC2000)	+= pnc2000.o
 obj-$(CONFIG_MTD_PCMCIA)	+= pcmciamtd.o
 obj-$(CONFIG_MTD_RPXLITE)	+= rpxlite.o
 obj-$(CONFIG_MTD_TQM8XXL)	+= tqm8xxl.o
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 28c5ffd75233..f00e04efbe28 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -20,11 +20,15 @@
 #include <linux/mtd/map.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
+#include <linux/mtd/concat.h>
 #include <asm/io.h>
 
+#define MAX_RESOURCES		4
+
 struct physmap_flash_info {
-	struct mtd_info		*mtd;
-	struct map_info		map;
+	struct mtd_info		*mtd[MAX_RESOURCES];
+	struct mtd_info		*cmtd;
+	struct map_info		map[MAX_RESOURCES];
 	struct resource		*res;
 #ifdef CONFIG_MTD_PARTITIONS
 	int			nr_parts;
@@ -32,11 +36,11 @@ struct physmap_flash_info {
 #endif
 };
 
-
 static int physmap_flash_remove(struct platform_device *dev)
 {
 	struct physmap_flash_info *info;
 	struct physmap_flash_data *physmap_data;
+	int i;
 
 	info = platform_get_drvdata(dev);
 	if (info == NULL)
@@ -45,24 +49,33 @@ static int physmap_flash_remove(struct platform_device *dev)
 
 	physmap_data = dev->dev.platform_data;
 
-	if (info->mtd != NULL) {
+#ifdef CONFIG_MTD_CONCAT
+	if (info->cmtd != info->mtd[0]) {
+		del_mtd_device(info->cmtd);
+		mtd_concat_destroy(info->cmtd);
+	}
+#endif
+
+	for (i = 0; i < MAX_RESOURCES; i++) {
+		if (info->mtd[i] != NULL) {
 #ifdef CONFIG_MTD_PARTITIONS
-		if (info->nr_parts) {
-			del_mtd_partitions(info->mtd);
-			kfree(info->parts);
-		} else if (physmap_data->nr_parts) {
-			del_mtd_partitions(info->mtd);
-		} else {
-			del_mtd_device(info->mtd);
-		}
+			if (info->nr_parts) {
+				del_mtd_partitions(info->mtd[i]);
+				kfree(info->parts);
+			} else if (physmap_data->nr_parts) {
+				del_mtd_partitions(info->mtd[i]);
+			} else {
+				del_mtd_device(info->mtd[i]);
+			}
 #else
-		del_mtd_device(info->mtd);
+			del_mtd_device(info->mtd[i]);
 #endif
-		map_destroy(info->mtd);
-	}
+			map_destroy(info->mtd[i]);
+		}
 
-	if (info->map.virt != NULL)
-		iounmap(info->map.virt);
+		if (info->map[i].virt != NULL)
+			iounmap(info->map[i].virt);
+	}
 
 	if (info->res != NULL) {
 		release_resource(info->res);
@@ -82,16 +95,14 @@ static int physmap_flash_probe(struct platform_device *dev)
 	struct physmap_flash_data *physmap_data;
 	struct physmap_flash_info *info;
 	const char **probe_type;
-	int err;
+	int err = 0;
+	int i;
+	int devices_found = 0;
 
 	physmap_data = dev->dev.platform_data;
 	if (physmap_data == NULL)
 		return -ENODEV;
 
-       	printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
-	    (unsigned long long)(dev->resource->end - dev->resource->start + 1),
-	    (unsigned long long)dev->resource->start);
-
 	info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL);
 	if (info == NULL) {
 		err = -ENOMEM;
@@ -100,56 +111,83 @@ static int physmap_flash_probe(struct platform_device *dev)
 
 	platform_set_drvdata(dev, info);
 
-	info->res = request_mem_region(dev->resource->start,
-			dev->resource->end - dev->resource->start + 1,
-			dev->dev.bus_id);
-	if (info->res == NULL) {
-		dev_err(&dev->dev, "Could not reserve memory region\n");
-		err = -ENOMEM;
-		goto err_out;
-	}
+	for (i = 0; i < dev->num_resources; i++) {
+		printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
+		       (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
+		       (unsigned long long)dev->resource[i].start);
+
+		info->res = request_mem_region(dev->resource[i].start,
+					       dev->resource[i].end - dev->resource[i].start + 1,
+					       dev->dev.bus_id);
+		if (info->res == NULL) {
+			dev_err(&dev->dev, "Could not reserve memory region\n");
+			err = -ENOMEM;
+			goto err_out;
+		}
 
-	info->map.name = dev->dev.bus_id;
-	info->map.phys = dev->resource->start;
-	info->map.size = dev->resource->end - dev->resource->start + 1;
-	info->map.bankwidth = physmap_data->width;
-	info->map.set_vpp = physmap_data->set_vpp;
+		info->map[i].name = dev->dev.bus_id;
+		info->map[i].phys = dev->resource[i].start;
+		info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
+		info->map[i].bankwidth = physmap_data->width;
+		info->map[i].set_vpp = physmap_data->set_vpp;
+
+		info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size);
+		if (info->map[i].virt == NULL) {
+			dev_err(&dev->dev, "Failed to ioremap flash region\n");
+			err = EIO;
+			goto err_out;
+		}
 
-	info->map.virt = ioremap(info->map.phys, info->map.size);
-	if (info->map.virt == NULL) {
-		dev_err(&dev->dev, "Failed to ioremap flash region\n");
-		err = EIO;
-		goto err_out;
-	}
+		simple_map_init(&info->map[i]);
 
-	simple_map_init(&info->map);
+		probe_type = rom_probe_types;
+		for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++)
+			info->mtd[i] = do_map_probe(*probe_type, &info->map[i]);
+		if (info->mtd[i] == NULL) {
+			dev_err(&dev->dev, "map_probe failed\n");
+			err = -ENXIO;
+			goto err_out;
+		} else {
+			devices_found++;
+		}
+		info->mtd[i]->owner = THIS_MODULE;
+	}
 
-	probe_type = rom_probe_types;
-	for (; info->mtd == NULL && *probe_type != NULL; probe_type++)
-		info->mtd = do_map_probe(*probe_type, &info->map);
-	if (info->mtd == NULL) {
-		dev_err(&dev->dev, "map_probe failed\n");
+	if (devices_found == 1) {
+		info->cmtd = info->mtd[0];
+	} else if (devices_found > 1) {
+		/*
+		 * We detected multiple devices. Concatenate them together.
+		 */
+#ifdef CONFIG_MTD_CONCAT
+		info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id);
+		if (info->cmtd == NULL)
+			err = -ENXIO;
+#else
+		printk(KERN_ERR "physmap-flash: multiple devices "
+		       "found but MTD concat support disabled.\n");
 		err = -ENXIO;
-		goto err_out;
+#endif
 	}
-	info->mtd->owner = THIS_MODULE;
+	if (err)
+		goto err_out;
 
 #ifdef CONFIG_MTD_PARTITIONS
-	err = parse_mtd_partitions(info->mtd, part_probe_types, &info->parts, 0);
+	err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0);
 	if (err > 0) {
-		add_mtd_partitions(info->mtd, info->parts, err);
+		add_mtd_partitions(info->cmtd, info->parts, err);
 		return 0;
 	}
 
 	if (physmap_data->nr_parts) {
 		printk(KERN_NOTICE "Using physmap partition information\n");
-		add_mtd_partitions(info->mtd, physmap_data->parts,
-						physmap_data->nr_parts);
+		add_mtd_partitions(info->cmtd, physmap_data->parts,
+				   physmap_data->nr_parts);
 		return 0;
 	}
 #endif
 
-	add_mtd_device(info->mtd);
+	add_mtd_device(info->cmtd);
 	return 0;
 
 err_out:
@@ -162,9 +200,11 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state
 {
 	struct physmap_flash_info *info = platform_get_drvdata(dev);
 	int ret = 0;
+	int i;
 
 	if (info)
-		ret = info->mtd->suspend(info->mtd);
+		for (i = 0; i < MAX_RESOURCES; i++)
+			ret |= info->mtd[i]->suspend(info->mtd[i]);
 
 	return ret;
 }
@@ -172,27 +212,35 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state
 static int physmap_flash_resume(struct platform_device *dev)
 {
 	struct physmap_flash_info *info = platform_get_drvdata(dev);
+	int i;
+
 	if (info)
-		info->mtd->resume(info->mtd);
+		for (i = 0; i < MAX_RESOURCES; i++)
+			info->mtd[i]->resume(info->mtd[i]);
 	return 0;
 }
 
 static void physmap_flash_shutdown(struct platform_device *dev)
 {
 	struct physmap_flash_info *info = platform_get_drvdata(dev);
-	if (info && info->mtd->suspend(info->mtd) == 0)
-		info->mtd->resume(info->mtd);
+	int i;
+
+	for (i = 0; i < MAX_RESOURCES; i++)
+		if (info && info->mtd[i]->suspend(info->mtd[i]) == 0)
+			info->mtd[i]->resume(info->mtd[i]);
 }
+#else
+#define physmap_flash_suspend NULL
+#define physmap_flash_resume NULL
+#define physmap_flash_shutdown NULL
 #endif
 
 static struct platform_driver physmap_flash_driver = {
 	.probe		= physmap_flash_probe,
 	.remove		= physmap_flash_remove,
-#ifdef CONFIG_PM
 	.suspend	= physmap_flash_suspend,
 	.resume		= physmap_flash_resume,
 	.shutdown	= physmap_flash_shutdown,
-#endif
 	.driver		= {
 		.name	= "physmap-flash",
 	},
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index aeed9ea79714..49acd4171893 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -80,64 +80,6 @@ static int parse_obsolete_partitions(struct of_device *dev,
 
 	return nr_parts;
 }
-
-static int __devinit parse_partitions(struct of_flash *info,
-				      struct of_device *dev)
-{
-	const char *partname;
-	static const char *part_probe_types[]
-		= { "cmdlinepart", "RedBoot", NULL };
-	struct device_node *dp = dev->node, *pp;
-	int nr_parts, i;
-
-	/* First look for RedBoot table or partitions on the command
-	 * line, these take precedence over device tree information */
-	nr_parts = parse_mtd_partitions(info->mtd, part_probe_types,
-					&info->parts, 0);
-	if (nr_parts > 0) {
-		add_mtd_partitions(info->mtd, info->parts, nr_parts);
-		return 0;
-	}
-
-	/* First count the subnodes */
-	nr_parts = 0;
-	for (pp = dp->child; pp; pp = pp->sibling)
-		nr_parts++;
-
-	if (nr_parts == 0)
-		return parse_obsolete_partitions(dev, info, dp);
-
-	info->parts = kzalloc(nr_parts * sizeof(*info->parts),
-			      GFP_KERNEL);
-	if (!info->parts)
-		return -ENOMEM;
-
-	for (pp = dp->child, i = 0; pp; pp = pp->sibling, i++) {
-		const u32 *reg;
-		int len;
-
-		reg = of_get_property(pp, "reg", &len);
-		if (!reg || (len != 2*sizeof(u32))) {
-			dev_err(&dev->dev, "Invalid 'reg' on %s\n",
-				dp->full_name);
-			kfree(info->parts);
-			info->parts = NULL;
-			return -EINVAL;
-		}
-		info->parts[i].offset = reg[0];
-		info->parts[i].size = reg[1];
-
-		partname = of_get_property(pp, "label", &len);
-		if (!partname)
-			partname = of_get_property(pp, "name", &len);
-		info->parts[i].name = (char *)partname;
-
-		if (of_get_property(pp, "read-only", &len))
-			info->parts[i].mask_flags = MTD_WRITEABLE;
-	}
-
-	return nr_parts;
-}
 #else /* MTD_PARTITIONS */
 #define	OF_FLASH_PARTS(info)		(0)
 #define parse_partitions(info, dev)	(0)
@@ -212,6 +154,10 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
 static int __devinit of_flash_probe(struct of_device *dev,
 				    const struct of_device_id *match)
 {
+#ifdef CONFIG_MTD_PARTITIONS
+	static const char *part_probe_types[]
+		= { "cmdlinepart", "RedBoot", NULL };
+#endif
 	struct device_node *dp = dev->node;
 	struct resource res;
 	struct of_flash *info;
@@ -274,13 +220,33 @@ static int __devinit of_flash_probe(struct of_device *dev,
 	}
 	info->mtd->owner = THIS_MODULE;
 
-	err = parse_partitions(info, dev);
+#ifdef CONFIG_MTD_PARTITIONS
+	/* First look for RedBoot table or partitions on the command
+	 * line, these take precedence over device tree information */
+	err = parse_mtd_partitions(info->mtd, part_probe_types,
+	                           &info->parts, 0);
 	if (err < 0)
-		goto err_out;
+		return err;
+
+#ifdef CONFIG_MTD_OF_PARTS
+	if (err == 0) {
+		err = of_mtd_parse_partitions(&dev->dev, info->mtd,
+		                              dp, &info->parts);
+		if (err < 0)
+			return err;
+	}
+#endif
+
+	if (err == 0) {
+		err = parse_obsolete_partitions(dev, info, dp);
+		if (err < 0)
+			return err;
+	}
 
 	if (err > 0)
-		add_mtd_partitions(info->mtd, OF_FLASH_PARTS(info), err);
+		add_mtd_partitions(info->mtd, info->parts, err);
 	else
+#endif
 		add_mtd_device(info->mtd);
 
 	return 0;
diff --git a/drivers/mtd/maps/pnc2000.c b/drivers/mtd/maps/pnc2000.c
deleted file mode 100644
index d7e16c2d5c44..000000000000
--- a/drivers/mtd/maps/pnc2000.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- *	pnc2000.c - mapper for Photron PNC-2000 board.
- *
- * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
- *
- * This code is GPL
- *
- * $Id: pnc2000.c,v 1.18 2005/11/07 11:14:28 gleixner Exp $
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-
-#define WINDOW_ADDR 0xbf000000
-#define WINDOW_SIZE 0x00400000
-
-/*
- * MAP DRIVER STUFF
- */
-
-
-static struct map_info pnc_map = {
-	.name = "PNC-2000",
-	.size = WINDOW_SIZE,
-	.bankwidth = 4,
-	.phys = 0xFFFFFFFF,
-	.virt = (void __iomem *)WINDOW_ADDR,
-};
-
-
-/*
- * MTD 'PARTITIONING' STUFF
- */
-static struct mtd_partition pnc_partitions[3] = {
-	{
-		.name = "PNC-2000 boot firmware",
-		.size = 0x20000,
-		.offset = 0
-	},
-	{
-		.name = "PNC-2000 kernel",
-		.size = 0x1a0000,
-		.offset = 0x20000
-	},
-	{
-		.name = "PNC-2000 filesystem",
-		.size = 0x240000,
-		.offset = 0x1c0000
-	}
-};
-
-/*
- * This is the master MTD device for which all the others are just
- * auto-relocating aliases.
- */
-static struct mtd_info *mymtd;
-
-static int __init init_pnc2000(void)
-{
-	printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
-
-	simple_map_init(&pnc_map);
-
-	mymtd = do_map_probe("cfi_probe", &pnc_map);
-	if (mymtd) {
-		mymtd->owner = THIS_MODULE;
-		return add_mtd_partitions(mymtd, pnc_partitions, 3);
-	}
-
-	return -ENXIO;
-}
-
-static void __exit cleanup_pnc2000(void)
-{
-	if (mymtd) {
-		del_mtd_partitions(mymtd);
-		map_destroy(mymtd);
-	}
-}
-
-module_init(init_pnc2000);
-module_exit(cleanup_pnc2000);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp>");
-MODULE_DESCRIPTION("MTD map driver for Photron PNC-2000 board");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index dcfb85840d1e..0fc5584324e3 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -79,7 +79,7 @@ scb2_fixup_mtd(struct mtd_info *mtd)
 	struct cfi_private *cfi = map->fldrv_priv;
 
 	/* barf if this doesn't look right */
-	if (cfi->cfiq->InterfaceDesc != 1) {
+	if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) {
 		printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
 		    cfi->cfiq->InterfaceDesc);
 		return -1;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 74d9d30edabd..839eed8430a2 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -248,9 +248,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
 		return -EBUSY;
 	}
 
-	mutex_init(&new->lock);
 	list_add_tail(&new->list, &tr->devs);
  added:
+	mutex_init(&new->lock);
 	if (!tr->writesect)
 		new->readonly = 1;
 
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index a0cee86464ca..5d3ac512ce16 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -481,6 +481,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
 	{
 		struct mtd_oob_buf buf;
 		struct mtd_oob_ops ops;
+	        uint32_t retlen;
 
 		if(!(file->f_mode & 2))
 			return -EPERM;
@@ -520,8 +521,11 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
 		buf.start &= ~(mtd->oobsize - 1);
 		ret = mtd->write_oob(mtd, buf.start, &ops);
 
-		if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen,
-				 sizeof(uint32_t)))
+		if (ops.oobretlen > 0xFFFFFFFFU)
+			ret = -EOVERFLOW;
+		retlen = ops.oobretlen;
+		if (copy_to_user(&((struct mtd_oob_buf *)argp)->length,
+				 &retlen, sizeof(buf.length)))
 			ret = -EFAULT;
 
 		kfree(ops.oobbuf);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 6c2645e28371..f7e7890e5bc6 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -61,7 +61,7 @@ int add_mtd_device(struct mtd_info *mtd)
 
 			/* Some chips always power up locked. Unlock them now */
 			if ((mtd->flags & MTD_WRITEABLE)
-			    && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) {
+			    && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
 				if (mtd->unlock(mtd, 0, mtd->size))
 					printk(KERN_WARNING
 					       "%s: unlock failed, "
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index f8af627f0b98..d3cf05012b46 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -28,19 +28,26 @@
 #include <linux/workqueue.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
 #include <linux/mtd/mtd.h>
 
 #define OOPS_PAGE_SIZE 4096
 
-static struct mtdoops_context {
+struct mtdoops_context {
 	int mtd_index;
-	struct work_struct work;
+	struct work_struct work_erase;
+	struct work_struct work_write;
 	struct mtd_info *mtd;
 	int oops_pages;
 	int nextpage;
 	int nextcount;
 
 	void *oops_buf;
+
+	/* writecount and disabling ready are spin lock protected */
+	spinlock_t writecount_lock;
 	int ready;
 	int writecount;
 } oops_cxt;
@@ -62,10 +69,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
 	erase.mtd = mtd;
 	erase.callback = mtdoops_erase_callback;
 	erase.addr = offset;
-	if (mtd->erasesize < OOPS_PAGE_SIZE)
-		erase.len = OOPS_PAGE_SIZE;
-	else
-		erase.len = mtd->erasesize;
+	erase.len = mtd->erasesize;
 	erase.priv = (u_long)&wait_q;
 
 	set_current_state(TASK_INTERRUPTIBLE);
@@ -87,7 +91,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
 	return 0;
 }
 
-static int mtdoops_inc_counter(struct mtdoops_context *cxt)
+static void mtdoops_inc_counter(struct mtdoops_context *cxt)
 {
 	struct mtd_info *mtd = cxt->mtd;
 	size_t retlen;
@@ -103,25 +107,30 @@ static int mtdoops_inc_counter(struct mtdoops_context *cxt)
 
 	ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
 			&retlen, (u_char *) &count);
-	if ((retlen != 4) || (ret < 0)) {
+	if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
 		printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
 				", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
 				retlen, ret);
-		return 1;
+		schedule_work(&cxt->work_erase);
+		return;
 	}
 
 	/* See if we need to erase the next block */
-	if (count != 0xffffffff)
-		return 1;
+	if (count != 0xffffffff) {
+		schedule_work(&cxt->work_erase);
+		return;
+	}
 
 	printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
 			cxt->nextpage, cxt->nextcount);
 	cxt->ready = 1;
-	return 0;
 }
 
-static void mtdoops_prepare(struct mtdoops_context *cxt)
+/* Scheduled work - when we can't proceed without erasing a block */
+static void mtdoops_workfunc_erase(struct work_struct *work)
 {
+	struct mtdoops_context *cxt =
+			container_of(work, struct mtdoops_context, work_erase);
 	struct mtd_info *mtd = cxt->mtd;
 	int i = 0, j, ret, mod;
 
@@ -136,8 +145,14 @@ static void mtdoops_prepare(struct mtdoops_context *cxt)
 			cxt->nextpage = 0;
 	}
 
-	while (mtd->block_isbad &&
-			mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) {
+	while (mtd->block_isbad) {
+		ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
+		if (!ret)
+			break;
+		if (ret < 0) {
+			printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
+			return;
+		}
 badblock:
 		printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
 				cxt->nextpage * OOPS_PAGE_SIZE);
@@ -154,34 +169,72 @@ badblock:
 	for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
 		ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
 
-	if (ret < 0) {
-		if (mtd->block_markbad)
-			mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
-		goto badblock;
+	if (ret >= 0) {
+		printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
+		cxt->ready = 1;
+		return;
 	}
 
-	printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
+	if (mtd->block_markbad && (ret == -EIO)) {
+		ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
+		if (ret < 0) {
+			printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
+			return;
+		}
+	}
+	goto badblock;
+}
 
-	cxt->ready = 1;
+static void mtdoops_write(struct mtdoops_context *cxt, int panic)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	size_t retlen;
+	int ret;
+
+	if (cxt->writecount < OOPS_PAGE_SIZE)
+		memset(cxt->oops_buf + cxt->writecount, 0xff,
+					OOPS_PAGE_SIZE - cxt->writecount);
+
+	if (panic)
+		ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
+					OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
+	else
+		ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
+					OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
+
+	cxt->writecount = 0;
+
+	if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
+		printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
+			cxt->nextpage * OOPS_PAGE_SIZE, retlen,	OOPS_PAGE_SIZE, ret);
+
+	mtdoops_inc_counter(cxt);
 }
 
-static void mtdoops_workfunc(struct work_struct *work)
+
+static void mtdoops_workfunc_write(struct work_struct *work)
 {
 	struct mtdoops_context *cxt =
-			container_of(work, struct mtdoops_context, work);
+			container_of(work, struct mtdoops_context, work_write);
 
-	mtdoops_prepare(cxt);
-}
+	mtdoops_write(cxt, 0);
+}					
 
-static int find_next_position(struct mtdoops_context *cxt)
+static void find_next_position(struct mtdoops_context *cxt)
 {
 	struct mtd_info *mtd = cxt->mtd;
-	int page, maxpos = 0;
+	int ret, page, maxpos = 0;
 	u32 count, maxcount = 0xffffffff;
 	size_t retlen;
 
 	for (page = 0; page < cxt->oops_pages; page++) {
-		mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
+		ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
+		if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
+			printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
+				", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
+			continue;
+		}
+
 		if (count == 0xffffffff)
 			continue;
 		if (maxcount == 0xffffffff) {
@@ -205,20 +258,19 @@ static int find_next_position(struct mtdoops_context *cxt)
 		cxt->ready = 1;
 		printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
 				cxt->nextpage, cxt->nextcount);
-		return 0;
+		return;
 	}
 
 	cxt->nextpage = maxpos;
 	cxt->nextcount = maxcount;
 
-	return mtdoops_inc_counter(cxt);
+	mtdoops_inc_counter(cxt);
 }
 
 
 static void mtdoops_notify_add(struct mtd_info *mtd)
 {
 	struct mtdoops_context *cxt = &oops_cxt;
-	int ret;
 
 	if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
 		return;
@@ -229,14 +281,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
 		return;
 	}
 
+	if (mtd->erasesize < OOPS_PAGE_SIZE) {
+		printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
+				mtd->index);
+		return;
+	}
+
 	cxt->mtd = mtd;
 	cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
 
-	ret = find_next_position(cxt);
-	if (ret == 1)
-		mtdoops_prepare(cxt);
+	find_next_position(cxt);
 
-	printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index);
+	printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
 }
 
 static void mtdoops_notify_remove(struct mtd_info *mtd)
@@ -254,31 +310,28 @@ static void mtdoops_console_sync(void)
 {
 	struct mtdoops_context *cxt = &oops_cxt;
 	struct mtd_info *mtd = cxt->mtd;
-	size_t retlen;
-	int ret;
+	unsigned long flags;
 
-	if (!cxt->ready || !mtd)
+	if (!cxt->ready || !mtd || cxt->writecount == 0)
 		return;
 
-	if (cxt->writecount == 0)
+	/* 
+	 *  Once ready is 0 and we've held the lock no further writes to the 
+	 *  buffer will happen
+	 */
+	spin_lock_irqsave(&cxt->writecount_lock, flags);
+	if (!cxt->ready) {
+		spin_unlock_irqrestore(&cxt->writecount_lock, flags);
 		return;
-
-	if (cxt->writecount < OOPS_PAGE_SIZE)
-		memset(cxt->oops_buf + cxt->writecount, 0xff,
-					OOPS_PAGE_SIZE - cxt->writecount);
-
-	ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
-					OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
+	}
 	cxt->ready = 0;
-	cxt->writecount = 0;
-
-	if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
-		printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
-			cxt->nextpage * OOPS_PAGE_SIZE, retlen,	OOPS_PAGE_SIZE, ret);
+	spin_unlock_irqrestore(&cxt->writecount_lock, flags);
 
-	ret = mtdoops_inc_counter(cxt);
-	if (ret == 1)
-		schedule_work(&cxt->work);
+	if (mtd->panic_write && in_interrupt())
+		/* Interrupt context, we're going to panic so try and log */
+		mtdoops_write(cxt, 1);
+	else
+		schedule_work(&cxt->work_write);
 }
 
 static void
@@ -286,7 +339,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
 {
 	struct mtdoops_context *cxt = co->data;
 	struct mtd_info *mtd = cxt->mtd;
-	int i;
+	unsigned long flags;
 
 	if (!oops_in_progress) {
 		mtdoops_console_sync();
@@ -296,6 +349,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
 	if (!cxt->ready || !mtd)
 		return;
 
+	/* Locking on writecount ensures sequential writes to the buffer */
+	spin_lock_irqsave(&cxt->writecount_lock, flags);
+
+	/* Check ready status didn't change whilst waiting for the lock */
+	if (!cxt->ready)
+		return;
+
 	if (cxt->writecount == 0) {
 		u32 *stamp = cxt->oops_buf;
 		*stamp = cxt->nextcount;
@@ -305,10 +365,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
 	if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
 		count = OOPS_PAGE_SIZE - cxt->writecount;
 
-	for (i = 0; i < count; i++, s++)
-		*((char *)(cxt->oops_buf) + cxt->writecount + i) = *s;
+	memcpy(cxt->oops_buf + cxt->writecount, s, count);
+	cxt->writecount += count;
 
-	cxt->writecount = cxt->writecount + count;
+	spin_unlock_irqrestore(&cxt->writecount_lock, flags);
+
+	if (cxt->writecount == OOPS_PAGE_SIZE)
+		mtdoops_console_sync();
 }
 
 static int __init mtdoops_console_setup(struct console *co, char *options)
@@ -334,7 +397,6 @@ static struct console mtdoops_console = {
 	.write		= mtdoops_console_write,
 	.setup		= mtdoops_console_setup,
 	.unblank	= mtdoops_console_sync,
-	.flags		= CON_PRINTBUFFER,
 	.index		= -1,
 	.data		= &oops_cxt,
 };
@@ -347,11 +409,12 @@ static int __init mtdoops_console_init(void)
 	cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
 
 	if (!cxt->oops_buf) {
-		printk(KERN_ERR "Failed to allocate oops buffer workspace\n");
+		printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
 		return -ENOMEM;
 	}
 
-	INIT_WORK(&cxt->work, mtdoops_workfunc);
+	INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
+	INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
 
 	register_console(&mtdoops_console);
 	register_mtd_user(&mtdoops_notifier);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 6174a97d7902..c66902df3171 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -151,6 +151,20 @@ static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
 				    len, retlen, buf);
 }
 
+static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
+			size_t *retlen, const u_char *buf)
+{
+	struct mtd_part *part = PART(mtd);
+	if (!(mtd->flags & MTD_WRITEABLE))
+		return -EROFS;
+	if (to >= mtd->size)
+		len = 0;
+	else if (to + len > mtd->size)
+		len = mtd->size - to;
+	return part->master->panic_write (part->master, to + part->offset,
+				    len, retlen, buf);
+}
+
 static int part_write_oob(struct mtd_info *mtd, loff_t to,
 			 struct mtd_oob_ops *ops)
 {
@@ -352,6 +366,9 @@ int add_mtd_partitions(struct mtd_info *master,
 		slave->mtd.read = part_read;
 		slave->mtd.write = part_write;
 
+		if (master->panic_write)
+			slave->mtd.panic_write = part_panic_write;
+
 		if(master->point && master->unpoint){
 			slave->mtd.point = part_point;
 			slave->mtd.unpoint = part_unpoint;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 246d4512f64b..4a3c6759492b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -93,7 +93,7 @@ config MTD_NAND_AU1550
 
 config MTD_NAND_BF5XX
 	tristate "Blackfin on-chip NAND Flash Controller driver"
-	depends on BF54x && MTD_NAND
+	depends on (BF54x || BF52x) && MTD_NAND
 	help
 	  This enables the Blackfin on-chip NAND flash controller
 
@@ -283,6 +283,12 @@ config MTD_NAND_CM_X270
 	tristate "Support for NAND Flash on CM-X270 modules"
 	depends on MTD_NAND && MACH_ARMCORE
 
+config MTD_NAND_PASEMI
+	tristate "NAND support for PA Semi PWRficient"
+	depends on MTD_NAND && PPC_PASEMI
+	help
+	  Enables support for NAND Flash interface on PA Semi PWRficient
+	  based boards
 
 config MTD_NAND_NANDSIM
 	tristate "Support for NAND Flash Simulator"
@@ -306,4 +312,22 @@ config MTD_ALAUDA
 	  These two (and possibly other) Alauda-based cardreaders for
 	  SmartMedia and xD allow raw flash access.
 
+config MTD_NAND_ORION
+	tristate "NAND Flash support for Marvell Orion SoC"
+	depends on ARCH_ORION && MTD_NAND
+	help
+	  This enables the NAND flash controller on Orion machines.
+
+	  No board specific support is done by this driver, each board
+	  must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_FSL_ELBC
+	tristate "NAND support for Freescale eLBC controllers"
+	depends on MTD_NAND && PPC_OF
+	help
+	  Various Freescale chips, including the 8313, include a NAND Flash
+	  Controller Module with built-in hardware ECC capabilities.
+	  Enabling this option will enable you to use this to control
+	  external NAND devices.
+
 endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 3ad6c0165da3..80d575eeee96 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -29,5 +29,8 @@ obj-$(CONFIG_MTD_NAND_CM_X270)		+= cmx270_nand.o
 obj-$(CONFIG_MTD_NAND_BASLER_EXCITE)	+= excite_nandflash.o
 obj-$(CONFIG_MTD_NAND_PLATFORM)		+= plat_nand.o
 obj-$(CONFIG_MTD_ALAUDA)		+= alauda.o
+obj-$(CONFIG_MTD_NAND_PASEMI)		+= pasemi_nand.o
+obj-$(CONFIG_MTD_NAND_ORION)		+= orion_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_ELBC)		+= fsl_elbc_nand.o
 
 nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c
index b2a5672df6e0..c9fb2acf4056 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/at91_nand.c
@@ -156,14 +156,14 @@ static int __init at91_nand_probe(struct platform_device *pdev)
 	}
 
 #ifdef CONFIG_MTD_PARTITIONS
-	if (host->board->partition_info)
-		partitions = host->board->partition_info(mtd->size, &num_partitions);
 #ifdef CONFIG_MTD_CMDLINE_PARTS
-	else {
-		mtd->name = "at91_nand";
-		num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
-	}
+	mtd->name = "at91_nand";
+	num_partitions = parse_mtd_partitions(mtd, part_probes,
+					      &partitions, 0);
 #endif
+	if (num_partitions <= 0 && host->board->partition_info)
+		partitions = host->board->partition_info(mtd->size,
+							 &num_partitions);
 
 	if ((!partitions) || (num_partitions == 0)) {
 		printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index a52f3a737c39..747042ab094a 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -74,7 +74,22 @@ static int hardware_ecc = 1;
 static int hardware_ecc;
 #endif
 
-static unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, 0};
+static unsigned short bfin_nfc_pin_req[] =
+	{P_NAND_CE,
+	 P_NAND_RB,
+	 P_NAND_D0,
+	 P_NAND_D1,
+	 P_NAND_D2,
+	 P_NAND_D3,
+	 P_NAND_D4,
+	 P_NAND_D5,
+	 P_NAND_D6,
+	 P_NAND_D7,
+	 P_NAND_WE,
+	 P_NAND_RE,
+	 P_NAND_CLE,
+	 P_NAND_ALE,
+	 0};
 
 /*
  * Data structures for bf5xx nand flash controller driver
@@ -278,7 +293,6 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
 	u16 ecc0, ecc1;
 	u32 code[2];
 	u8 *p;
-	int bytes = 3, i;
 
 	/* first 4 bytes ECC code for 256 page size */
 	ecc0 = bfin_read_NFC_ECC0();
@@ -288,19 +302,24 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
 
 	dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
 
+	/* first 3 bytes in ecc_code for 256 page size */
+	p = (u8 *) code;
+	memcpy(ecc_code, p, 3);
+
 	/* second 4 bytes ECC code for 512 page size */
 	if (page_size == 512) {
 		ecc0 = bfin_read_NFC_ECC2();
 		ecc1 = bfin_read_NFC_ECC3();
 		code[1] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11);
-		bytes = 6;
+
+		/* second 3 bytes in ecc_code for second 256
+		 * bytes of 512 page size
+		 */
+		p = (u8 *) (code + 1);
+		memcpy((ecc_code + 3), p, 3);
 		dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]);
 	}
 
-	p = (u8 *)code;
-	for (i = 0; i < bytes; i++)
-		ecc_code[i] = p[i];
-
 	return 0;
 }
 
@@ -507,12 +526,13 @@ static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
 
 	init_completion(&info->dma_completion);
 
+#ifdef CONFIG_BF54x
 	/* Setup DMAC1 channel mux for NFC which shared with SDH */
 	val = bfin_read_DMAC1_PERIMUX();
 	val &= 0xFFFE;
 	bfin_write_DMAC1_PERIMUX(val);
 	SSYNC();
-
+#endif
 	/* Request NFC DMA channel */
 	ret = request_dma(CH_NFC, "BF5XX NFC driver");
 	if (ret < 0) {
@@ -744,9 +764,6 @@ static int bf5xx_nand_resume(struct platform_device *dev)
 {
 	struct bf5xx_nand_info *info = platform_get_drvdata(dev);
 
-	if (info)
-		bf5xx_nand_hw_init(info);
-
 	return 0;
 }
 
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 1e811715211a..da6ceaa80ba1 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -11,6 +11,7 @@
 #undef DEBUG
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
 #include <linux/rslib.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
@@ -52,6 +53,7 @@
 
 struct cafe_priv {
 	struct nand_chip nand;
+	struct mtd_partition *parts;
 	struct pci_dev *pdev;
 	void __iomem *mmio;
 	struct rs_control *rs;
@@ -84,6 +86,10 @@ static unsigned int numtimings;
 static int timing[3];
 module_param_array(timing, int, &numtimings, 0644);
 
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "RedBoot", NULL };
+#endif
+
 /* Hrm. Why isn't this already conditional on something in the struct device? */
 #define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
 
@@ -620,7 +626,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
 {
 	struct mtd_info *mtd;
 	struct cafe_priv *cafe;
+	struct mtd_partition *parts;
 	uint32_t ctrl;
+	int nr_parts;
 	int err = 0;
 
 	/* Very old versions shared the same PCI ident for all three
@@ -787,7 +795,18 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
 		goto out_irq;
 
 	pci_set_drvdata(pdev, mtd);
+
+	/* We register the whole device first, separate from the partitions */
 	add_mtd_device(mtd);
+
+#ifdef CONFIG_MTD_PARTITIONS
+	nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
+	if (nr_parts > 0) {
+		cafe->parts = parts;
+		dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts);
+		add_mtd_partitions(mtd, parts, nr_parts);
+	}
+#endif
 	goto out;
 
  out_irq:
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
new file mode 100644
index 000000000000..b025dfe0b274
--- /dev/null
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -0,0 +1,1244 @@
+/* Freescale Enhanced Local Bus Controller NAND driver
+ *
+ * Copyright (c) 2006-2007 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ *          Scott Wood <scottwood@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+
+
+#define MAX_BANKS 8
+#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
+#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
+
+struct elbc_bank {
+	__be32 br;             /**< Base Register  */
+#define BR_BA           0xFFFF8000
+#define BR_BA_SHIFT             15
+#define BR_PS           0x00001800
+#define BR_PS_SHIFT             11
+#define BR_PS_8         0x00000800  /* Port Size 8 bit */
+#define BR_PS_16        0x00001000  /* Port Size 16 bit */
+#define BR_PS_32        0x00001800  /* Port Size 32 bit */
+#define BR_DECC         0x00000600
+#define BR_DECC_SHIFT            9
+#define BR_DECC_OFF     0x00000000  /* HW ECC checking and generation off */
+#define BR_DECC_CHK     0x00000200  /* HW ECC checking on, generation off */
+#define BR_DECC_CHK_GEN 0x00000400  /* HW ECC checking and generation on */
+#define BR_WP           0x00000100
+#define BR_WP_SHIFT              8
+#define BR_MSEL         0x000000E0
+#define BR_MSEL_SHIFT            5
+#define BR_MS_GPCM      0x00000000  /* GPCM */
+#define BR_MS_FCM       0x00000020  /* FCM */
+#define BR_MS_SDRAM     0x00000060  /* SDRAM */
+#define BR_MS_UPMA      0x00000080  /* UPMA */
+#define BR_MS_UPMB      0x000000A0  /* UPMB */
+#define BR_MS_UPMC      0x000000C0  /* UPMC */
+#define BR_V            0x00000001
+#define BR_V_SHIFT               0
+#define BR_RES          ~(BR_BA|BR_PS|BR_DECC|BR_WP|BR_MSEL|BR_V)
+
+	__be32 or;             /**< Base Register  */
+#define OR0 0x5004
+#define OR1 0x500C
+#define OR2 0x5014
+#define OR3 0x501C
+#define OR4 0x5024
+#define OR5 0x502C
+#define OR6 0x5034
+#define OR7 0x503C
+
+#define OR_FCM_AM               0xFFFF8000
+#define OR_FCM_AM_SHIFT                 15
+#define OR_FCM_BCTLD            0x00001000
+#define OR_FCM_BCTLD_SHIFT              12
+#define OR_FCM_PGS              0x00000400
+#define OR_FCM_PGS_SHIFT                10
+#define OR_FCM_CSCT             0x00000200
+#define OR_FCM_CSCT_SHIFT                9
+#define OR_FCM_CST              0x00000100
+#define OR_FCM_CST_SHIFT                 8
+#define OR_FCM_CHT              0x00000080
+#define OR_FCM_CHT_SHIFT                 7
+#define OR_FCM_SCY              0x00000070
+#define OR_FCM_SCY_SHIFT                 4
+#define OR_FCM_SCY_1            0x00000010
+#define OR_FCM_SCY_2            0x00000020
+#define OR_FCM_SCY_3            0x00000030
+#define OR_FCM_SCY_4            0x00000040
+#define OR_FCM_SCY_5            0x00000050
+#define OR_FCM_SCY_6            0x00000060
+#define OR_FCM_SCY_7            0x00000070
+#define OR_FCM_RST              0x00000008
+#define OR_FCM_RST_SHIFT                 3
+#define OR_FCM_TRLX             0x00000004
+#define OR_FCM_TRLX_SHIFT                2
+#define OR_FCM_EHTR             0x00000002
+#define OR_FCM_EHTR_SHIFT                1
+};
+
+struct elbc_regs {
+	struct elbc_bank bank[8];
+	u8 res0[0x28];
+	__be32 mar;             /**< UPM Address Register */
+	u8 res1[0x4];
+	__be32 mamr;            /**< UPMA Mode Register */
+	__be32 mbmr;            /**< UPMB Mode Register */
+	__be32 mcmr;            /**< UPMC Mode Register */
+	u8 res2[0x8];
+	__be32 mrtpr;           /**< Memory Refresh Timer Prescaler Register */
+	__be32 mdr;             /**< UPM Data Register */
+	u8 res3[0x4];
+	__be32 lsor;            /**< Special Operation Initiation Register */
+	__be32 lsdmr;           /**< SDRAM Mode Register */
+	u8 res4[0x8];
+	__be32 lurt;            /**< UPM Refresh Timer */
+	__be32 lsrt;            /**< SDRAM Refresh Timer */
+	u8 res5[0x8];
+	__be32 ltesr;           /**< Transfer Error Status Register */
+#define LTESR_BM   0x80000000
+#define LTESR_FCT  0x40000000
+#define LTESR_PAR  0x20000000
+#define LTESR_WP   0x04000000
+#define LTESR_ATMW 0x00800000
+#define LTESR_ATMR 0x00400000
+#define LTESR_CS   0x00080000
+#define LTESR_CC   0x00000001
+#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC)
+	__be32 ltedr;           /**< Transfer Error Disable Register */
+	__be32 lteir;           /**< Transfer Error Interrupt Register */
+	__be32 lteatr;          /**< Transfer Error Attributes Register */
+	__be32 ltear;           /**< Transfer Error Address Register */
+	u8 res6[0xC];
+	__be32 lbcr;            /**< Configuration Register */
+#define LBCR_LDIS  0x80000000
+#define LBCR_LDIS_SHIFT    31
+#define LBCR_BCTLC 0x00C00000
+#define LBCR_BCTLC_SHIFT   22
+#define LBCR_AHD   0x00200000
+#define LBCR_LPBSE 0x00020000
+#define LBCR_LPBSE_SHIFT   17
+#define LBCR_EPAR  0x00010000
+#define LBCR_EPAR_SHIFT    16
+#define LBCR_BMT   0x0000FF00
+#define LBCR_BMT_SHIFT      8
+#define LBCR_INIT  0x00040000
+	__be32 lcrr;            /**< Clock Ratio Register */
+#define LCRR_DBYP    0x80000000
+#define LCRR_DBYP_SHIFT      31
+#define LCRR_BUFCMDC 0x30000000
+#define LCRR_BUFCMDC_SHIFT   28
+#define LCRR_ECL     0x03000000
+#define LCRR_ECL_SHIFT       24
+#define LCRR_EADC    0x00030000
+#define LCRR_EADC_SHIFT      16
+#define LCRR_CLKDIV  0x0000000F
+#define LCRR_CLKDIV_SHIFT     0
+	u8 res7[0x8];
+	__be32 fmr;             /**< Flash Mode Register */
+#define FMR_CWTO     0x0000F000
+#define FMR_CWTO_SHIFT       12
+#define FMR_BOOT     0x00000800
+#define FMR_ECCM     0x00000100
+#define FMR_AL       0x00000030
+#define FMR_AL_SHIFT          4
+#define FMR_OP       0x00000003
+#define FMR_OP_SHIFT          0
+	__be32 fir;             /**< Flash Instruction Register */
+#define FIR_OP0      0xF0000000
+#define FIR_OP0_SHIFT        28
+#define FIR_OP1      0x0F000000
+#define FIR_OP1_SHIFT        24
+#define FIR_OP2      0x00F00000
+#define FIR_OP2_SHIFT        20
+#define FIR_OP3      0x000F0000
+#define FIR_OP3_SHIFT        16
+#define FIR_OP4      0x0000F000
+#define FIR_OP4_SHIFT        12
+#define FIR_OP5      0x00000F00
+#define FIR_OP5_SHIFT         8
+#define FIR_OP6      0x000000F0
+#define FIR_OP6_SHIFT         4
+#define FIR_OP7      0x0000000F
+#define FIR_OP7_SHIFT         0
+#define FIR_OP_NOP   0x0	/* No operation and end of sequence */
+#define FIR_OP_CA    0x1        /* Issue current column address */
+#define FIR_OP_PA    0x2        /* Issue current block+page address */
+#define FIR_OP_UA    0x3        /* Issue user defined address */
+#define FIR_OP_CM0   0x4        /* Issue command from FCR[CMD0] */
+#define FIR_OP_CM1   0x5        /* Issue command from FCR[CMD1] */
+#define FIR_OP_CM2   0x6        /* Issue command from FCR[CMD2] */
+#define FIR_OP_CM3   0x7        /* Issue command from FCR[CMD3] */
+#define FIR_OP_WB    0x8        /* Write FBCR bytes from FCM buffer */
+#define FIR_OP_WS    0x9        /* Write 1 or 2 bytes from MDR[AS] */
+#define FIR_OP_RB    0xA        /* Read FBCR bytes to FCM buffer */
+#define FIR_OP_RS    0xB        /* Read 1 or 2 bytes to MDR[AS] */
+#define FIR_OP_CW0   0xC        /* Wait then issue FCR[CMD0] */
+#define FIR_OP_CW1   0xD        /* Wait then issue FCR[CMD1] */
+#define FIR_OP_RBW   0xE        /* Wait then read FBCR bytes */
+#define FIR_OP_RSW   0xE        /* Wait then read 1 or 2 bytes */
+	__be32 fcr;             /**< Flash Command Register */
+#define FCR_CMD0     0xFF000000
+#define FCR_CMD0_SHIFT       24
+#define FCR_CMD1     0x00FF0000
+#define FCR_CMD1_SHIFT       16
+#define FCR_CMD2     0x0000FF00
+#define FCR_CMD2_SHIFT        8
+#define FCR_CMD3     0x000000FF
+#define FCR_CMD3_SHIFT        0
+	__be32 fbar;            /**< Flash Block Address Register */
+#define FBAR_BLK     0x00FFFFFF
+	__be32 fpar;            /**< Flash Page Address Register */
+#define FPAR_SP_PI   0x00007C00
+#define FPAR_SP_PI_SHIFT     10
+#define FPAR_SP_MS   0x00000200
+#define FPAR_SP_CI   0x000001FF
+#define FPAR_SP_CI_SHIFT      0
+#define FPAR_LP_PI   0x0003F000
+#define FPAR_LP_PI_SHIFT     12
+#define FPAR_LP_MS   0x00000800
+#define FPAR_LP_CI   0x000007FF
+#define FPAR_LP_CI_SHIFT      0
+	__be32 fbcr;            /**< Flash Byte Count Register */
+#define FBCR_BC      0x00000FFF
+	u8 res11[0x8];
+	u8 res8[0xF00];
+};
+
+struct fsl_elbc_ctrl;
+
+/* mtd information per set */
+
+struct fsl_elbc_mtd {
+	struct mtd_info mtd;
+	struct nand_chip chip;
+	struct fsl_elbc_ctrl *ctrl;
+
+	struct device *dev;
+	int bank;               /* Chip select bank number           */
+	u8 __iomem *vbase;      /* Chip select base virtual address  */
+	int page_size;          /* NAND page size (0=512, 1=2048)    */
+	unsigned int fmr;       /* FCM Flash Mode Register value     */
+};
+
+/* overview of the fsl elbc controller */
+
+struct fsl_elbc_ctrl {
+	struct nand_hw_control controller;
+	struct fsl_elbc_mtd *chips[MAX_BANKS];
+
+	/* device info */
+	struct device *dev;
+	struct elbc_regs __iomem *regs;
+	int irq;
+	wait_queue_head_t irq_wait;
+	unsigned int irq_status; /* status read from LTESR by irq handler */
+	u8 __iomem *addr;        /* Address of assigned FCM buffer        */
+	unsigned int page;       /* Last page written to / read from      */
+	unsigned int read_bytes; /* Number of bytes read during command   */
+	unsigned int column;     /* Saved column from SEQIN               */
+	unsigned int index;      /* Pointer to next byte to 'read'        */
+	unsigned int status;     /* status read from LTESR after last op  */
+	unsigned int mdr;        /* UPM/FCM Data Register value           */
+	unsigned int use_mdr;    /* Non zero if the MDR is to be set      */
+	unsigned int oob;        /* Non zero if operating on OOB data     */
+	char *oob_poi;           /* Place to write ECC after read back    */
+};
+
+/* These map to the positions used by the FCM hardware ECC generator */
+
+/* Small Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
+	.eccbytes = 3,
+	.eccpos = {6, 7, 8},
+	.oobfree = { {0, 5}, {9, 7} },
+	.oobavail = 12,
+};
+
+/* Small Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
+	.eccbytes = 3,
+	.eccpos = {8, 9, 10},
+	.oobfree = { {0, 5}, {6, 2}, {11, 5} },
+	.oobavail = 12,
+};
+
+/* Large Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
+	.eccbytes = 12,
+	.eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
+	.oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
+	.oobavail = 48,
+};
+
+/* Large Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
+	.eccbytes = 12,
+	.eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
+	.oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
+	.oobavail = 48,
+};
+
+/*=================================*/
+
+/*
+ * Set up the FCM hardware block and page address fields, and the fcm
+ * structure addr field to point to the correct FCM buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+	struct elbc_regs __iomem *lbc = ctrl->regs;
+	int buf_num;
+
+	ctrl->page = page_addr;
+
+	out_be32(&lbc->fbar,
+	         page_addr >> (chip->phys_erase_shift - chip->page_shift));
+
+	if (priv->page_size) {
+		out_be32(&lbc->fpar,
+		         ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
+		         (oob ? FPAR_LP_MS : 0) | column);
+		buf_num = (page_addr & 1) << 2;
+	} else {
+		out_be32(&lbc->fpar,
+		         ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
+		         (oob ? FPAR_SP_MS : 0) | column);
+		buf_num = page_addr & 7;
+	}
+
+	ctrl->addr = priv->vbase + buf_num * 1024;
+	ctrl->index = column;
+
+	/* for OOB data point to the second half of the buffer */
+	if (oob)
+		ctrl->index += priv->page_size ? 2048 : 512;
+
+	dev_vdbg(ctrl->dev, "set_addr: bank=%d, ctrl->addr=0x%p (0x%p), "
+	                    "index %x, pes %d ps %d\n",
+	         buf_num, ctrl->addr, priv->vbase, ctrl->index,
+	         chip->phys_erase_shift, chip->page_shift);
+}
+
+/*
+ * execute FCM command and wait for it to complete
+ */
+static int fsl_elbc_run_command(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+	struct elbc_regs __iomem *lbc = ctrl->regs;
+
+	/* Setup the FMR[OP] to execute without write protection */
+	out_be32(&lbc->fmr, priv->fmr | 3);
+	if (ctrl->use_mdr)
+		out_be32(&lbc->mdr, ctrl->mdr);
+
+	dev_vdbg(ctrl->dev,
+	         "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
+	         in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
+	dev_vdbg(ctrl->dev,
+	         "fsl_elbc_run_command: fbar=%08x fpar=%08x "
+	         "fbcr=%08x bank=%d\n",
+	         in_be32(&lbc->fbar), in_be32(&lbc->fpar),
+	         in_be32(&lbc->fbcr), priv->bank);
+
+	/* execute special operation */
+	out_be32(&lbc->lsor, priv->bank);
+
+	/* wait for FCM complete flag or timeout */
+	ctrl->irq_status = 0;
+	wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
+	                   FCM_TIMEOUT_MSECS * HZ/1000);
+	ctrl->status = ctrl->irq_status;
+
+	/* store mdr value in case it was needed */
+	if (ctrl->use_mdr)
+		ctrl->mdr = in_be32(&lbc->mdr);
+
+	ctrl->use_mdr = 0;
+
+	dev_vdbg(ctrl->dev,
+	         "fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n",
+	         ctrl->status, ctrl->mdr, in_be32(&lbc->fmr));
+
+	/* returns 0 on success otherwise non-zero) */
+	return ctrl->status == LTESR_CC ? 0 : -EIO;
+}
+
+static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
+{
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+	struct elbc_regs __iomem *lbc = ctrl->regs;
+
+	if (priv->page_size) {
+		out_be32(&lbc->fir,
+		         (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+		         (FIR_OP_CA  << FIR_OP1_SHIFT) |
+		         (FIR_OP_PA  << FIR_OP2_SHIFT) |
+		         (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+		         (FIR_OP_RBW << FIR_OP4_SHIFT));
+
+		out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
+		                    (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
+	} else {
+		out_be32(&lbc->fir,
+		         (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+		         (FIR_OP_CA  << FIR_OP1_SHIFT) |
+		         (FIR_OP_PA  << FIR_OP2_SHIFT) |
+		         (FIR_OP_RBW << FIR_OP3_SHIFT));
+
+		if (oob)
+			out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT);
+		else
+			out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
+	}
+}
+
+/* cmdfunc send commands to the FCM */
+static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+                             int column, int page_addr)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+	struct elbc_regs __iomem *lbc = ctrl->regs;
+
+	ctrl->use_mdr = 0;
+
+	/* clear the read buffer */
+	ctrl->read_bytes = 0;
+	if (command != NAND_CMD_PAGEPROG)
+		ctrl->index = 0;
+
+	switch (command) {
+	/* READ0 and READ1 read the entire buffer to use hardware ECC. */
+	case NAND_CMD_READ1:
+		column += 256;
+
+	/* fall-through */
+	case NAND_CMD_READ0:
+		dev_dbg(ctrl->dev,
+		        "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
+		        " 0x%x, column: 0x%x.\n", page_addr, column);
+
+
+		out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
+		set_addr(mtd, 0, page_addr, 0);
+
+		ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+		ctrl->index += column;
+
+		fsl_elbc_do_read(chip, 0);
+		fsl_elbc_run_command(mtd);
+		return;
+
+	/* READOOB reads only the OOB because no ECC is performed. */
+	case NAND_CMD_READOOB:
+		dev_vdbg(ctrl->dev,
+		         "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
+			 " 0x%x, column: 0x%x.\n", page_addr, column);
+
+		out_be32(&lbc->fbcr, mtd->oobsize - column);
+		set_addr(mtd, column, page_addr, 1);
+
+		ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+		fsl_elbc_do_read(chip, 1);
+		fsl_elbc_run_command(mtd);
+		return;
+
+	/* READID must read all 5 possible bytes while CEB is active */
+	case NAND_CMD_READID:
+		dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
+
+		out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+		                    (FIR_OP_UA  << FIR_OP1_SHIFT) |
+		                    (FIR_OP_RBW << FIR_OP2_SHIFT));
+		out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
+		/* 5 bytes for manuf, device and exts */
+		out_be32(&lbc->fbcr, 5);
+		ctrl->read_bytes = 5;
+		ctrl->use_mdr = 1;
+		ctrl->mdr = 0;
+
+		set_addr(mtd, 0, 0, 0);
+		fsl_elbc_run_command(mtd);
+		return;
+
+	/* ERASE1 stores the block and page address */
+	case NAND_CMD_ERASE1:
+		dev_vdbg(ctrl->dev,
+		         "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
+		         "page_addr: 0x%x.\n", page_addr);
+		set_addr(mtd, 0, page_addr, 0);
+		return;
+
+	/* ERASE2 uses the block and page address from ERASE1 */
+	case NAND_CMD_ERASE2:
+		dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
+
+		out_be32(&lbc->fir,
+		         (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+		         (FIR_OP_PA  << FIR_OP1_SHIFT) |
+		         (FIR_OP_CM1 << FIR_OP2_SHIFT));
+
+		out_be32(&lbc->fcr,
+		         (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
+		         (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT));
+
+		out_be32(&lbc->fbcr, 0);
+		ctrl->read_bytes = 0;
+
+		fsl_elbc_run_command(mtd);
+		return;
+
+	/* SEQIN sets up the addr buffer and all registers except the length */
+	case NAND_CMD_SEQIN: {
+		__be32 fcr;
+		dev_vdbg(ctrl->dev,
+		         "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
+		         "page_addr: 0x%x, column: 0x%x.\n",
+		         page_addr, column);
+
+		ctrl->column = column;
+		ctrl->oob = 0;
+
+		fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
+		      (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
+
+		if (priv->page_size) {
+			out_be32(&lbc->fir,
+			         (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+			         (FIR_OP_CA  << FIR_OP1_SHIFT) |
+			         (FIR_OP_PA  << FIR_OP2_SHIFT) |
+			         (FIR_OP_WB  << FIR_OP3_SHIFT) |
+			         (FIR_OP_CW1 << FIR_OP4_SHIFT));
+
+			fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+		} else {
+			out_be32(&lbc->fir,
+			         (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+			         (FIR_OP_CM2 << FIR_OP1_SHIFT) |
+			         (FIR_OP_CA  << FIR_OP2_SHIFT) |
+			         (FIR_OP_PA  << FIR_OP3_SHIFT) |
+			         (FIR_OP_WB  << FIR_OP4_SHIFT) |
+			         (FIR_OP_CW1 << FIR_OP5_SHIFT));
+
+			if (column >= mtd->writesize) {
+				/* OOB area --> READOOB */
+				column -= mtd->writesize;
+				fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
+				ctrl->oob = 1;
+			} else if (column < 256) {
+				/* First 256 bytes --> READ0 */
+				fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+			} else {
+				/* Second 256 bytes --> READ1 */
+				fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT;
+			}
+		}
+
+		out_be32(&lbc->fcr, fcr);
+		set_addr(mtd, column, page_addr, ctrl->oob);
+		return;
+	}
+
+	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+	case NAND_CMD_PAGEPROG: {
+		int full_page;
+		dev_vdbg(ctrl->dev,
+		         "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
+		         "writing %d bytes.\n", ctrl->index);
+
+		/* if the write did not start at 0 or is not a full page
+		 * then set the exact length, otherwise use a full page
+		 * write so the HW generates the ECC.
+		 */
+		if (ctrl->oob || ctrl->column != 0 ||
+		    ctrl->index != mtd->writesize + mtd->oobsize) {
+			out_be32(&lbc->fbcr, ctrl->index);
+			full_page = 0;
+		} else {
+			out_be32(&lbc->fbcr, 0);
+			full_page = 1;
+		}
+
+		fsl_elbc_run_command(mtd);
+
+		/* Read back the page in order to fill in the ECC for the
+		 * caller.  Is this really needed?
+		 */
+		if (full_page && ctrl->oob_poi) {
+			out_be32(&lbc->fbcr, 3);
+			set_addr(mtd, 6, page_addr, 1);
+
+			ctrl->read_bytes = mtd->writesize + 9;
+
+			fsl_elbc_do_read(chip, 1);
+			fsl_elbc_run_command(mtd);
+
+			memcpy_fromio(ctrl->oob_poi + 6,
+			              &ctrl->addr[ctrl->index], 3);
+			ctrl->index += 3;
+		}
+
+		ctrl->oob_poi = NULL;
+		return;
+	}
+
+	/* CMD_STATUS must read the status byte while CEB is active */
+	/* Note - it does not wait for the ready line */
+	case NAND_CMD_STATUS:
+		out_be32(&lbc->fir,
+		         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+		         (FIR_OP_RBW << FIR_OP1_SHIFT));
+		out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+		out_be32(&lbc->fbcr, 1);
+		set_addr(mtd, 0, 0, 0);
+		ctrl->read_bytes = 1;
+
+		fsl_elbc_run_command(mtd);
+
+		/* The chip always seems to report that it is
+		 * write-protected, even when it is not.
+		 */
+		setbits8(ctrl->addr, NAND_STATUS_WP);
+		return;
+
+	/* RESET without waiting for the ready line */
+	case NAND_CMD_RESET:
+		dev_dbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
+		out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
+		out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
+		fsl_elbc_run_command(mtd);
+		return;
+
+	default:
+		dev_err(ctrl->dev,
+		        "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
+		        command);
+	}
+}
+
+static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
+{
+	/* The hardware does not seem to support multiple
+	 * chips per bank.
+	 */
+}
+
+/*
+ * Write buf to the FCM Controller Data Buffer
+ */
+static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+	unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+	if (len < 0) {
+		dev_err(ctrl->dev, "write_buf of %d bytes", len);
+		ctrl->status = 0;
+		return;
+	}
+
+	if ((unsigned int)len > bufsize - ctrl->index) {
+		dev_err(ctrl->dev,
+		        "write_buf beyond end of buffer "
+		        "(%d requested, %u available)\n",
+		        len, bufsize - ctrl->index);
+		len = bufsize - ctrl->index;
+	}
+
+	memcpy_toio(&ctrl->addr[ctrl->index], buf, len);
+	ctrl->index += len;
+}
+
+/*
+ * read a byte from either the FCM hardware buffer if it has any data left
+ * otherwise issue a command to read a single byte.
+ */
+static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+
+	/* If there are still bytes in the FCM, then use the next byte. */
+	if (ctrl->index < ctrl->read_bytes)
+		return in_8(&ctrl->addr[ctrl->index++]);
+
+	dev_err(ctrl->dev, "read_byte beyond end of buffer\n");
+	return ERR_BYTE;
+}
+
+/*
+ * Read from the FCM Controller Data Buffer
+ */
+static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+	int avail;
+
+	if (len < 0)
+		return;
+
+	avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index);
+	memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail);
+	ctrl->index += avail;
+
+	if (len > avail)
+		dev_err(ctrl->dev,
+		        "read_buf beyond end of buffer "
+		        "(%d requested, %d available)\n",
+		        len, avail);
+}
+
+/*
+ * Verify buffer against the FCM Controller Data Buffer
+ */
+static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+	int i;
+
+	if (len < 0) {
+		dev_err(ctrl->dev, "write_buf of %d bytes", len);
+		return -EINVAL;
+	}
+
+	if ((unsigned int)len > ctrl->read_bytes - ctrl->index) {
+		dev_err(ctrl->dev,
+		        "verify_buf beyond end of buffer "
+		        "(%d requested, %u available)\n",
+		        len, ctrl->read_bytes - ctrl->index);
+
+		ctrl->index = ctrl->read_bytes;
+		return -EINVAL;
+	}
+
+	for (i = 0; i < len; i++)
+		if (in_8(&ctrl->addr[ctrl->index + i]) != buf[i])
+			break;
+
+	ctrl->index += len;
+	return i == len && ctrl->status == LTESR_CC ? 0 : -EIO;
+}
+
+/* This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+	struct elbc_regs __iomem *lbc = ctrl->regs;
+
+	if (ctrl->status != LTESR_CC)
+		return NAND_STATUS_FAIL;
+
+	/* Use READ_STATUS command, but wait for the device to be ready */
+	ctrl->use_mdr = 0;
+	out_be32(&lbc->fir,
+	         (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+	         (FIR_OP_RBW << FIR_OP1_SHIFT));
+	out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+	out_be32(&lbc->fbcr, 1);
+	set_addr(mtd, 0, 0, 0);
+	ctrl->read_bytes = 1;
+
+	fsl_elbc_run_command(mtd);
+
+	if (ctrl->status != LTESR_CC)
+		return NAND_STATUS_FAIL;
+
+	/* The chip always seems to report that it is
+	 * write-protected, even when it is not.
+	 */
+	setbits8(ctrl->addr, NAND_STATUS_WP);
+	return fsl_elbc_read_byte(mtd);
+}
+
+static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+	struct elbc_regs __iomem *lbc = ctrl->regs;
+	unsigned int al;
+
+	/* calculate FMR Address Length field */
+	al = 0;
+	if (chip->pagemask & 0xffff0000)
+		al++;
+	if (chip->pagemask & 0xff000000)
+		al++;
+
+	/* add to ECCM mode set in fsl_elbc_init */
+	priv->fmr |= (12 << FMR_CWTO_SHIFT) |  /* Timeout > 12 ms */
+	             (al << FMR_AL_SHIFT);
+
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->numchips = %d\n",
+	        chip->numchips);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chipsize = %ld\n",
+	        chip->chipsize);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
+	        chip->pagemask);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
+	        chip->chip_delay);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
+	        chip->badblockpos);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
+	        chip->chip_shift);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->page_shift = %d\n",
+	        chip->page_shift);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
+	        chip->phys_erase_shift);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
+	        chip->ecclayout);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
+	        chip->ecc.mode);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
+	        chip->ecc.steps);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
+	        chip->ecc.bytes);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
+	        chip->ecc.total);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
+	        chip->ecc.layout);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->size = %d\n", mtd->size);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
+	        mtd->erasesize);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->writesize = %d\n",
+	        mtd->writesize);
+	dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
+	        mtd->oobsize);
+
+	/* adjust Option Register and ECC to match Flash page size */
+	if (mtd->writesize == 512) {
+		priv->page_size = 0;
+		clrbits32(&lbc->bank[priv->bank].or, ~OR_FCM_PGS);
+	} else if (mtd->writesize == 2048) {
+		priv->page_size = 1;
+		setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+		/* adjust ecc setup if needed */
+		if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+		    BR_DECC_CHK_GEN) {
+			chip->ecc.size = 512;
+			chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
+			                   &fsl_elbc_oob_lp_eccm1 :
+			                   &fsl_elbc_oob_lp_eccm0;
+			mtd->ecclayout = chip->ecc.layout;
+			mtd->oobavail = chip->ecc.layout->oobavail;
+		}
+	} else {
+		dev_err(ctrl->dev,
+		        "fsl_elbc_init: page size %d is not supported\n",
+		        mtd->writesize);
+		return -1;
+	}
+
+	/* The default u-boot configuration on MPC8313ERDB causes errors;
+	 * more delay is needed.  This should be safe for other boards
+	 * as well.
+	 */
+	setbits32(&lbc->bank[priv->bank].or, 0x70);
+	return 0;
+}
+
+static int fsl_elbc_read_page(struct mtd_info *mtd,
+                              struct nand_chip *chip,
+                              uint8_t *buf)
+{
+	fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+	fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
+		mtd->ecc_stats.failed++;
+
+	return 0;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static void fsl_elbc_write_page(struct mtd_info *mtd,
+                                struct nand_chip *chip,
+                                const uint8_t *buf)
+{
+	struct fsl_elbc_mtd *priv = chip->priv;
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+
+	fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	ctrl->oob_poi = chip->oob_poi;
+}
+
+static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+{
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+	struct elbc_regs __iomem *lbc = ctrl->regs;
+	struct nand_chip *chip = &priv->chip;
+
+	dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
+
+	/* Fill in fsl_elbc_mtd structure */
+	priv->mtd.priv = chip;
+	priv->mtd.owner = THIS_MODULE;
+	priv->fmr = 0; /* rest filled in later */
+
+	/* fill in nand_chip structure */
+	/* set up function call table */
+	chip->read_byte = fsl_elbc_read_byte;
+	chip->write_buf = fsl_elbc_write_buf;
+	chip->read_buf = fsl_elbc_read_buf;
+	chip->verify_buf = fsl_elbc_verify_buf;
+	chip->select_chip = fsl_elbc_select_chip;
+	chip->cmdfunc = fsl_elbc_cmdfunc;
+	chip->waitfunc = fsl_elbc_wait;
+
+	/* set up nand options */
+	chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+
+	chip->controller = &ctrl->controller;
+	chip->priv = priv;
+
+	chip->ecc.read_page = fsl_elbc_read_page;
+	chip->ecc.write_page = fsl_elbc_write_page;
+
+	/* If CS Base Register selects full hardware ECC then use it */
+	if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+	    BR_DECC_CHK_GEN) {
+		chip->ecc.mode = NAND_ECC_HW;
+		/* put in small page settings and adjust later if needed */
+		chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
+				&fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
+		chip->ecc.size = 512;
+		chip->ecc.bytes = 3;
+	} else {
+		/* otherwise fall back to default software ECC */
+		chip->ecc.mode = NAND_ECC_SOFT;
+	}
+
+	return 0;
+}
+
+static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
+{
+	struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+
+	nand_release(&priv->mtd);
+
+	if (priv->vbase)
+		iounmap(priv->vbase);
+
+	ctrl->chips[priv->bank] = NULL;
+	kfree(priv);
+
+	return 0;
+}
+
+static int fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
+                               struct device_node *node)
+{
+	struct elbc_regs __iomem *lbc = ctrl->regs;
+	struct fsl_elbc_mtd *priv;
+	struct resource res;
+#ifdef CONFIG_MTD_PARTITIONS
+	static const char *part_probe_types[]
+		= { "cmdlinepart", "RedBoot", NULL };
+	struct mtd_partition *parts;
+#endif
+	int ret;
+	int bank;
+
+	/* get, allocate and map the memory resource */
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret) {
+		dev_err(ctrl->dev, "failed to get resource\n");
+		return ret;
+	}
+
+	/* find which chip select it is connected to */
+	for (bank = 0; bank < MAX_BANKS; bank++)
+		if ((in_be32(&lbc->bank[bank].br) & BR_V) &&
+		    (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
+		    (in_be32(&lbc->bank[bank].br) &
+		     in_be32(&lbc->bank[bank].or) & BR_BA)
+		     == res.start)
+			break;
+
+	if (bank >= MAX_BANKS) {
+		dev_err(ctrl->dev, "address did not match any chip selects\n");
+		return -ENODEV;
+	}
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	ctrl->chips[bank] = priv;
+	priv->bank = bank;
+	priv->ctrl = ctrl;
+	priv->dev = ctrl->dev;
+
+	priv->vbase = ioremap(res.start, res.end - res.start + 1);
+	if (!priv->vbase) {
+		dev_err(ctrl->dev, "failed to map chip region\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = fsl_elbc_chip_init(priv);
+	if (ret)
+		goto err;
+
+	ret = nand_scan_ident(&priv->mtd, 1);
+	if (ret)
+		goto err;
+
+	ret = fsl_elbc_chip_init_tail(&priv->mtd);
+	if (ret)
+		goto err;
+
+	ret = nand_scan_tail(&priv->mtd);
+	if (ret)
+		goto err;
+
+#ifdef CONFIG_MTD_PARTITIONS
+	/* First look for RedBoot table or partitions on the command
+	 * line, these take precedence over device tree information */
+	ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
+	if (ret < 0)
+		goto err;
+
+#ifdef CONFIG_MTD_OF_PARTS
+	if (ret == 0) {
+		ret = of_mtd_parse_partitions(priv->dev, &priv->mtd,
+		                              node, &parts);
+		if (ret < 0)
+			goto err;
+	}
+#endif
+
+	if (ret > 0)
+		add_mtd_partitions(&priv->mtd, parts, ret);
+	else
+#endif
+		add_mtd_device(&priv->mtd);
+
+	printk(KERN_INFO "eLBC NAND device at 0x%zx, bank %d\n",
+	       res.start, priv->bank);
+	return 0;
+
+err:
+	fsl_elbc_chip_remove(priv);
+	return ret;
+}
+
+static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
+{
+	struct elbc_regs __iomem *lbc = ctrl->regs;
+
+	/* clear event registers */
+	setbits32(&lbc->ltesr, LTESR_NAND_MASK);
+	out_be32(&lbc->lteatr, 0);
+
+	/* Enable interrupts for any detected events */
+	out_be32(&lbc->lteir, LTESR_NAND_MASK);
+
+	ctrl->read_bytes = 0;
+	ctrl->index = 0;
+	ctrl->addr = NULL;
+
+	return 0;
+}
+
+static int __devexit fsl_elbc_ctrl_remove(struct of_device *ofdev)
+{
+	struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev);
+	int i;
+
+	for (i = 0; i < MAX_BANKS; i++)
+		if (ctrl->chips[i])
+			fsl_elbc_chip_remove(ctrl->chips[i]);
+
+	if (ctrl->irq)
+		free_irq(ctrl->irq, ctrl);
+
+	if (ctrl->regs)
+		iounmap(ctrl->regs);
+
+	dev_set_drvdata(&ofdev->dev, NULL);
+	kfree(ctrl);
+	return 0;
+}
+
+/* NOTE: This interrupt is also used to report other localbus events,
+ * such as transaction errors on other chipselects.  If we want to
+ * capture those, we'll need to move the IRQ code into a shared
+ * LBC driver.
+ */
+
+static irqreturn_t fsl_elbc_ctrl_irq(int irqno, void *data)
+{
+	struct fsl_elbc_ctrl *ctrl = data;
+	struct elbc_regs __iomem *lbc = ctrl->regs;
+	__be32 status = in_be32(&lbc->ltesr) & LTESR_NAND_MASK;
+
+	if (status) {
+		out_be32(&lbc->ltesr, status);
+		out_be32(&lbc->lteatr, 0);
+
+		ctrl->irq_status = status;
+		smp_wmb();
+		wake_up(&ctrl->irq_wait);
+
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+/* fsl_elbc_ctrl_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code allocates all of
+ * the resources needed for the controller only.  The
+ * resources for the NAND banks themselves are allocated
+ * in the chip probe function.
+*/
+
+static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev,
+                                         const struct of_device_id *match)
+{
+	struct device_node *child;
+	struct fsl_elbc_ctrl *ctrl;
+	int ret;
+
+	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+
+	dev_set_drvdata(&ofdev->dev, ctrl);
+
+	spin_lock_init(&ctrl->controller.lock);
+	init_waitqueue_head(&ctrl->controller.wq);
+	init_waitqueue_head(&ctrl->irq_wait);
+
+	ctrl->regs = of_iomap(ofdev->node, 0);
+	if (!ctrl->regs) {
+		dev_err(&ofdev->dev, "failed to get memory region\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	ctrl->irq = of_irq_to_resource(ofdev->node, 0, NULL);
+	if (ctrl->irq == NO_IRQ) {
+		dev_err(&ofdev->dev, "failed to get irq resource\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	ctrl->dev = &ofdev->dev;
+
+	ret = fsl_elbc_ctrl_init(ctrl);
+	if (ret < 0)
+		goto err;
+
+	ret = request_irq(ctrl->irq, fsl_elbc_ctrl_irq, 0, "fsl-elbc", ctrl);
+	if (ret != 0) {
+		dev_err(&ofdev->dev, "failed to install irq (%d)\n",
+		        ctrl->irq);
+		ret = ctrl->irq;
+		goto err;
+	}
+
+	for_each_child_of_node(ofdev->node, child)
+		if (of_device_is_compatible(child, "fsl,elbc-fcm-nand"))
+			fsl_elbc_chip_probe(ctrl, child);
+
+	return 0;
+
+err:
+	fsl_elbc_ctrl_remove(ofdev);
+	return ret;
+}
+
+static const struct of_device_id fsl_elbc_match[] = {
+	{
+		.compatible = "fsl,elbc",
+	},
+	{}
+};
+
+static struct of_platform_driver fsl_elbc_ctrl_driver = {
+	.driver = {
+		.name	= "fsl-elbc",
+	},
+	.match_table = fsl_elbc_match,
+	.probe = fsl_elbc_ctrl_probe,
+	.remove = __devexit_p(fsl_elbc_ctrl_remove),
+};
+
+static int __init fsl_elbc_init(void)
+{
+	return of_register_platform_driver(&fsl_elbc_ctrl_driver);
+}
+
+static void __exit fsl_elbc_exit(void)
+{
+	of_unregister_platform_driver(&fsl_elbc_ctrl_driver);
+}
+
+module_init(fsl_elbc_init);
+module_exit(fsl_elbc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ddd4fc019042..7acb1a0e7409 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2469,8 +2469,12 @@ int nand_scan_tail(struct mtd_info *mtd)
 			chip->ecc.write_oob = nand_write_oob_std;
 
 	case NAND_ECC_HW_SYNDROME:
-		if (!chip->ecc.calculate || !chip->ecc.correct ||
-		    !chip->ecc.hwctl) {
+		if ((!chip->ecc.calculate || !chip->ecc.correct ||
+		     !chip->ecc.hwctl) &&
+		    (!chip->ecc.read_page ||
+		     chip->ecc.read_page == nand_read_page_hwecc ||
+		     !chip->ecc.write_page ||
+		     chip->ecc.write_page == nand_write_page_hwecc)) {
 			printk(KERN_WARNING "No ECC functions supplied, "
 			       "Hardware ECC not possible\n");
 			BUG();
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
new file mode 100644
index 000000000000..9162cca0182b
--- /dev/null
+++ b/drivers/mtd/nand/orion_nand.c
@@ -0,0 +1,171 @@
+/*
+ * drivers/mtd/nand/orion_nand.c
+ *
+ * NAND support for Marvell Orion SoC platforms
+ *
+ * Tzachi Perelstein <tzachi@marvell.com>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+#include <asm/arch/platform.h>
+#include <asm/arch/hardware.h>
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+static const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+	struct nand_chip *nc = mtd->priv;
+	struct orion_nand_data *board = nc->priv;
+	u32 offs;
+
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_CLE)
+		offs = (1 << board->cle);
+	else if (ctrl & NAND_ALE)
+		offs = (1 << board->ale);
+	else
+		return;
+
+	if (nc->options & NAND_BUSWIDTH_16)
+		offs <<= 1;
+
+	writeb(cmd, nc->IO_ADDR_W + offs);
+}
+
+static int __init orion_nand_probe(struct platform_device *pdev)
+{
+	struct mtd_info *mtd;
+	struct nand_chip *nc;
+	struct orion_nand_data *board;
+	void __iomem *io_base;
+	int ret = 0;
+#ifdef CONFIG_MTD_PARTITIONS
+	struct mtd_partition *partitions = NULL;
+	int num_part = 0;
+#endif
+
+	nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
+	if (!nc) {
+		printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
+		ret = -ENOMEM;
+		goto no_res;
+	}
+	mtd = (struct mtd_info *)(nc + 1);
+
+	io_base = ioremap(pdev->resource[0].start,
+			pdev->resource[0].end - pdev->resource[0].start + 1);
+	if (!io_base) {
+		printk(KERN_ERR "orion_nand: ioremap failed\n");
+		ret = -EIO;
+		goto no_res;
+	}
+
+	board = pdev->dev.platform_data;
+
+	mtd->priv = nc;
+	mtd->owner = THIS_MODULE;
+
+	nc->priv = board;
+	nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
+	nc->cmd_ctrl = orion_nand_cmd_ctrl;
+	nc->ecc.mode = NAND_ECC_SOFT;
+
+	if (board->width == 16)
+		nc->options |= NAND_BUSWIDTH_16;
+
+	platform_set_drvdata(pdev, mtd);
+
+	if (nand_scan(mtd, 1)) {
+		ret = -ENXIO;
+		goto no_dev;
+	}
+
+#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+	mtd->name = "orion_nand";
+	num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
+#endif
+	/* If cmdline partitions have been passed, let them be used */
+	if (num_part <= 0) {
+		num_part = board->nr_parts;
+		partitions = board->parts;
+	}
+
+	if (partitions && num_part > 0)
+		ret = add_mtd_partitions(mtd, partitions, num_part);
+	else
+		ret = add_mtd_device(mtd);
+#else
+	ret = add_mtd_device(mtd);
+#endif
+
+	if (ret) {
+		nand_release(mtd);
+		goto no_dev;
+	}
+
+	return 0;
+
+no_dev:
+	platform_set_drvdata(pdev, NULL);
+	iounmap(io_base);
+no_res:
+	kfree(nc);
+
+	return ret;
+}
+
+static int __devexit orion_nand_remove(struct platform_device *pdev)
+{
+	struct mtd_info *mtd = platform_get_drvdata(pdev);
+	struct nand_chip *nc = mtd->priv;
+
+	nand_release(mtd);
+
+	iounmap(nc->IO_ADDR_W);
+
+	kfree(nc);
+
+	return 0;
+}
+
+static struct platform_driver orion_nand_driver = {
+	.probe		= orion_nand_probe,
+	.remove		= orion_nand_remove,
+	.driver		= {
+		.name	= "orion_nand",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init orion_nand_init(void)
+{
+	return platform_driver_register(&orion_nand_driver);
+}
+
+static void __exit orion_nand_exit(void)
+{
+	platform_driver_unregister(&orion_nand_driver);
+}
+
+module_init(orion_nand_init);
+module_exit(orion_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_DESCRIPTION("NAND glue for Orion platforms");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
new file mode 100644
index 000000000000..75c899039023
--- /dev/null
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip NAND flash interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#undef DEBUG
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+
+#define LBICTRL_LPCCTL_NR		0x00004000
+#define CLE_PIN_CTL			15
+#define ALE_PIN_CTL			14
+
+static unsigned int lpcctl;
+static struct mtd_info *pasemi_nand_mtd;
+static const char driver_name[] = "pasemi-nand";
+
+static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *chip = mtd->priv;
+
+	while (len > 0x800) {
+		memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
+		buf += 0x800;
+		len -= 0x800;
+	}
+	memcpy_fromio(buf, chip->IO_ADDR_R, len);
+}
+
+static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct nand_chip *chip = mtd->priv;
+
+	while (len > 0x800) {
+		memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
+		buf += 0x800;
+		len -= 0x800;
+	}
+	memcpy_toio(chip->IO_ADDR_R, buf, len);
+}
+
+static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
+			     unsigned int ctrl)
+{
+	struct nand_chip *chip = mtd->priv;
+
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_CLE)
+		out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
+	else
+		out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
+
+	/* Push out posted writes */
+	eieio();
+	inl(lpcctl);
+}
+
+int pasemi_device_ready(struct mtd_info *mtd)
+{
+	return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
+}
+
+static int __devinit pasemi_nand_probe(struct of_device *ofdev,
+				      const struct of_device_id *match)
+{
+	struct pci_dev *pdev;
+	struct device_node *np = ofdev->node;
+	struct resource res;
+	struct nand_chip *chip;
+	int err = 0;
+
+	err = of_address_to_resource(np, 0, &res);
+
+	if (err)
+		return -EINVAL;
+
+	/* We only support one device at the moment */
+	if (pasemi_nand_mtd)
+		return -ENODEV;
+
+	pr_debug("pasemi_nand at %lx-%lx\n", res.start, res.end);
+
+	/* Allocate memory for MTD device structure and private data */
+	pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
+				  sizeof(struct nand_chip), GFP_KERNEL);
+	if (!pasemi_nand_mtd) {
+		printk(KERN_WARNING
+		       "Unable to allocate PASEMI NAND MTD device structure\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/* Get pointer to private data */
+	chip = (struct nand_chip *)&pasemi_nand_mtd[1];
+
+	/* Link the private data with the MTD structure */
+	pasemi_nand_mtd->priv = chip;
+	pasemi_nand_mtd->owner = THIS_MODULE;
+
+	chip->IO_ADDR_R = of_iomap(np, 0);
+	chip->IO_ADDR_W = chip->IO_ADDR_R;
+
+	if (!chip->IO_ADDR_R) {
+		err = -EIO;
+		goto out_mtd;
+	}
+
+	pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
+	if (!pdev) {
+		err = -ENODEV;
+		goto out_ior;
+	}
+
+	lpcctl = pci_resource_start(pdev, 0);
+
+	if (!request_region(lpcctl, 4, driver_name)) {
+		err = -EBUSY;
+		goto out_ior;
+	}
+
+	chip->cmd_ctrl = pasemi_hwcontrol;
+	chip->dev_ready = pasemi_device_ready;
+	chip->read_buf = pasemi_read_buf;
+	chip->write_buf = pasemi_write_buf;
+	chip->chip_delay = 0;
+	chip->ecc.mode = NAND_ECC_SOFT;
+
+	/* Enable the following for a flash based bad block table */
+	chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
+
+	/* Scan to find existance of the device */
+	if (nand_scan(pasemi_nand_mtd, 1)) {
+		err = -ENXIO;
+		goto out_lpc;
+	}
+
+	if (add_mtd_device(pasemi_nand_mtd)) {
+		printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
+		err = -ENODEV;
+		goto out_lpc;
+	}
+
+	printk(KERN_INFO "PA Semi NAND flash at %08lx, control at I/O %x\n",
+	       res.start, lpcctl);
+
+	return 0;
+
+ out_lpc:
+	release_region(lpcctl, 4);
+ out_ior:
+	iounmap(chip->IO_ADDR_R);
+ out_mtd:
+	kfree(pasemi_nand_mtd);
+ out:
+	return err;
+}
+
+static int __devexit pasemi_nand_remove(struct of_device *ofdev)
+{
+	struct nand_chip *chip;
+
+	if (!pasemi_nand_mtd)
+		return 0;
+
+	chip = pasemi_nand_mtd->priv;
+
+	/* Release resources, unregister device */
+	nand_release(pasemi_nand_mtd);
+
+	release_region(lpcctl, 4);
+
+	iounmap(chip->IO_ADDR_R);
+
+	/* Free the MTD device structure */
+	kfree(pasemi_nand_mtd);
+
+	pasemi_nand_mtd = NULL;
+
+	return 0;
+}
+
+static struct of_device_id pasemi_nand_match[] =
+{
+	{
+		.compatible   = "pasemi,localbus-nand",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, pasemi_nand_match);
+
+static struct of_platform_driver pasemi_nand_driver =
+{
+	.name		= (char*)driver_name,
+	.match_table	= pasemi_nand_match,
+	.probe		= pasemi_nand_probe,
+	.remove		= pasemi_nand_remove,
+};
+
+static int __init pasemi_nand_init(void)
+{
+	return of_register_platform_driver(&pasemi_nand_driver);
+}
+module_init(pasemi_nand_init);
+
+static void __exit pasemi_nand_exit(void)
+{
+	of_unregister_platform_driver(&pasemi_nand_driver);
+}
+module_exit(pasemi_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index cd725fc5e813..f6d5c2adc4fd 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -110,7 +110,9 @@ out:
 static int __devexit plat_nand_remove(struct platform_device *pdev)
 {
 	struct plat_nand_data *data = platform_get_drvdata(pdev);
+#ifdef CONFIG_MTD_PARTITIONS
 	struct platform_nand_data *pdata = pdev->dev.platform_data;
+#endif
 
 	nand_release(&data->mtd);
 #ifdef CONFIG_MTD_PARTITIONS
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 2bd0737572c6..9260ad947524 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -120,6 +120,8 @@ struct s3c2410_nand_info {
 	int				sel_bit;
 	int				mtd_count;
 
+	unsigned long			save_nfconf;
+
 	enum s3c_cpu_type		cpu_type;
 };
 
@@ -364,23 +366,21 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
 	    ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
 		/* calculate the bit position of the error */
 
-		bit  = (diff2 >> 2) & 1;
-		bit |= (diff2 >> 3) & 2;
-		bit |= (diff2 >> 4) & 4;
+		bit  = ((diff2 >> 3) & 1) |
+		       ((diff2 >> 4) & 2) |
+		       ((diff2 >> 5) & 4);
 
 		/* calculate the byte position of the error */
 
-		byte  = (diff1 << 1) & 0x80;
-		byte |= (diff1 << 2) & 0x40;
-		byte |= (diff1 << 3) & 0x20;
-		byte |= (diff1 << 4) & 0x10;
-
-		byte |= (diff0 >> 3) & 0x08;
-		byte |= (diff0 >> 2) & 0x04;
-		byte |= (diff0 >> 1) & 0x02;
-		byte |= (diff0 >> 0) & 0x01;
-
-		byte |= (diff2 << 8) & 0x100;
+		byte = ((diff2 << 7) & 0x100) |
+		       ((diff1 << 0) & 0x80)  |
+		       ((diff1 << 1) & 0x40)  |
+		       ((diff1 << 2) & 0x20)  |
+		       ((diff1 << 3) & 0x10)  |
+		       ((diff0 >> 4) & 0x08)  |
+		       ((diff0 >> 3) & 0x04)  |
+		       ((diff0 >> 2) & 0x02)  |
+		       ((diff0 >> 1) & 0x01);
 
 		dev_dbg(info->device, "correcting error bit %d, byte %d\n",
 			bit, byte);
@@ -399,7 +399,7 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
 	if ((diff0 & ~(1<<fls(diff0))) == 0)
 		return 1;
 
-	return 0;
+	return -1;
 }
 
 /* ECC functions
@@ -810,6 +810,16 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
 	struct s3c2410_nand_info *info = platform_get_drvdata(dev);
 
 	if (info) {
+		info->save_nfconf = readl(info->regs + S3C2410_NFCONF);
+
+		/* For the moment, we must ensure nFCE is high during
+		 * the time we are suspended. This really should be
+		 * handled by suspending the MTDs we are using, but
+		 * that is currently not the case. */
+
+		writel(info->save_nfconf | info->sel_bit,
+		       info->regs + S3C2410_NFCONF);
+
 		if (!allow_clk_stop(info))
 			clk_disable(info->clk);
 	}
@@ -820,11 +830,19 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
 static int s3c24xx_nand_resume(struct platform_device *dev)
 {
 	struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+	unsigned long nfconf;
 
 	if (info) {
 		clk_enable(info->clk);
 		s3c2410_nand_inithw(info, dev);
 
+		/* Restore the state of the nFCE line. */
+
+		nfconf = readl(info->regs + S3C2410_NFCONF);
+		nfconf &= ~info->sel_bit;
+		nfconf |= info->save_nfconf & info->sel_bit;
+		writel(nfconf, info->regs + S3C2410_NFCONF);
+
 		if (allow_clk_stop(info))
 			clk_disable(info->clk);
 	}
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
new file mode 100644
index 000000000000..f86e06934cd8
--- /dev/null
+++ b/drivers/mtd/ofpart.c
@@ -0,0 +1,74 @@
+/*
+ * Flash partitions described by the OF (or flattened) device tree
+ *
+ * Copyright (C) 2006 MontaVista Software Inc.
+ * Author: Vitaly Wool <vwool@ru.mvista.com>
+ *
+ * Revised to handle newer style flash binding by:
+ *   Copyright (C) 2007 David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+int __devinit of_mtd_parse_partitions(struct device *dev,
+                                      struct mtd_info *mtd,
+                                      struct device_node *node,
+                                      struct mtd_partition **pparts)
+{
+	const char *partname;
+	struct device_node *pp;
+	int nr_parts, i;
+
+	/* First count the subnodes */
+	pp = NULL;
+	nr_parts = 0;
+	while ((pp = of_get_next_child(node, pp)))
+		nr_parts++;
+
+	if (nr_parts == 0)
+		return 0;
+
+	*pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
+	if (!*pparts)
+		return -ENOMEM;
+
+	pp = NULL;
+	i = 0;
+	while ((pp = of_get_next_child(node, pp))) {
+		const u32 *reg;
+		int len;
+
+		reg = of_get_property(pp, "reg", &len);
+		if (!reg || (len != 2 * sizeof(u32))) {
+			of_node_put(pp);
+			dev_err(dev, "Invalid 'reg' on %s\n", node->full_name);
+			kfree(*pparts);
+			*pparts = NULL;
+			return -EINVAL;
+		}
+		(*pparts)[i].offset = reg[0];
+		(*pparts)[i].size = reg[1];
+
+		partname = of_get_property(pp, "label", &len);
+		if (!partname)
+			partname = of_get_property(pp, "name", &len);
+		(*pparts)[i].name = (char *)partname;
+
+		if (of_get_property(pp, "read-only", &len))
+			(*pparts)[i].mask_flags = MTD_WRITEABLE;
+
+		i++;
+	}
+
+	return nr_parts;
+}
+EXPORT_SYMBOL(of_mtd_parse_partitions);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1b0b32011415..8d7d21be1541 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/sched.h>
+#include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
 #include <linux/mtd/mtd.h>
@@ -170,6 +171,18 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
 }
 
 /**
+ * onenand_get_density - [DEFAULT] Get OneNAND density
+ * @param dev_id	OneNAND device ID
+ *
+ * Get OneNAND density from device ID
+ */
+static inline int onenand_get_density(int dev_id)
+{
+	int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+	return (density & ONENAND_DEVICE_DENSITY_MASK);
+}
+
+/**
  * onenand_command - [DEFAULT] Send command to OneNAND device
  * @param mtd		MTD device structure
  * @param cmd		the command to be sent
@@ -182,8 +195,7 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
 static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len)
 {
 	struct onenand_chip *this = mtd->priv;
-	int value, readcmd = 0, block_cmd = 0;
-	int block, page;
+	int value, block, page;
 
 	/* Address translation */
 	switch (cmd) {
@@ -198,7 +210,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
 	case ONENAND_CMD_ERASE:
 	case ONENAND_CMD_BUFFERRAM:
 	case ONENAND_CMD_OTP_ACCESS:
-		block_cmd = 1;
 		block = (int) (addr >> this->erase_shift);
 		page = -1;
 		break;
@@ -240,11 +251,9 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
 		value = onenand_block_address(this, block);
 		this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
 
-		if (block_cmd) {
-			/* Select DataRAM for DDP */
-			value = onenand_bufferram_address(this, block);
-			this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
-		}
+		/* Select DataRAM for DDP */
+		value = onenand_bufferram_address(this, block);
+		this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
 	}
 
 	if (page != -1) {
@@ -256,7 +265,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
 		case ONENAND_CMD_READ:
 		case ONENAND_CMD_READOOB:
 			dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
-			readcmd = 1;
 			break;
 
 		default:
@@ -273,12 +281,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
 		/* Write 'BSA, BSC' of DataRAM */
 		value = onenand_buffer_address(dataram, sectors, count);
 		this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
-
-		if (readcmd) {
-			/* Select DataRAM for DDP */
-			value = onenand_bufferram_address(this, block);
-			this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
-		}
 	}
 
 	/* Interrupt clear */
@@ -855,6 +857,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
 			this->command(mtd, ONENAND_CMD_READ, from, writesize);
  			ret = this->wait(mtd, FL_READING);
  			onenand_update_bufferram(mtd, from, !ret);
+			if (ret == -EBADMSG)
+				ret = 0;
  		}
  	}
 
@@ -913,6 +917,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
  		/* Now wait for load */
  		ret = this->wait(mtd, FL_READING);
  		onenand_update_bufferram(mtd, from, !ret);
+		if (ret == -EBADMSG)
+			ret = 0;
  	}
 
 	/*
@@ -923,12 +929,12 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
 	ops->retlen = read;
 	ops->oobretlen = oobread;
 
-	if (mtd->ecc_stats.failed - stats.failed)
-		return -EBADMSG;
-
 	if (ret)
 		return ret;
 
+	if (mtd->ecc_stats.failed - stats.failed)
+		return -EBADMSG;
+
 	return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
 }
 
@@ -944,6 +950,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
 			struct mtd_oob_ops *ops)
 {
 	struct onenand_chip *this = mtd->priv;
+	struct mtd_ecc_stats stats;
 	int read = 0, thislen, column, oobsize;
 	size_t len = ops->ooblen;
 	mtd_oob_mode_t mode = ops->mode;
@@ -977,6 +984,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
 		return -EINVAL;
 	}
 
+	stats = mtd->ecc_stats;
+
 	while (read < len) {
 		cond_resched();
 
@@ -988,18 +997,16 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
 		onenand_update_bufferram(mtd, from, 0);
 
 		ret = this->wait(mtd, FL_READING);
-		/* First copy data and check return value for ECC handling */
+		if (ret && ret != -EBADMSG) {
+			printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
+			break;
+		}
 
 		if (mode == MTD_OOB_AUTO)
 			onenand_transfer_auto_oob(mtd, buf, column, thislen);
 		else
 			this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
 
-		if (ret) {
-			printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
-			break;
-		}
-
 		read += thislen;
 
 		if (read == len)
@@ -1016,7 +1023,14 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
 	}
 
 	ops->oobretlen = read;
-	return ret;
+
+	if (ret)
+		return ret;
+
+	if (mtd->ecc_stats.failed - stats.failed)
+		return -EBADMSG;
+
+	return 0;
 }
 
 /**
@@ -1106,12 +1120,10 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
 	interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
 	ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
 
+	/* Initial bad block case: 0x2400 or 0x0400 */
 	if (ctrl & ONENAND_CTRL_ERROR) {
 		printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl);
-		/* Initial bad block case */
-		if (ctrl & ONENAND_CTRL_LOAD)
-			return ONENAND_BBT_READ_ERROR;
-		return ONENAND_BBT_READ_FATAL_ERROR;
+		return ONENAND_BBT_READ_ERROR;
 	}
 
 	if (interrupt & ONENAND_INT_READ) {
@@ -1206,7 +1218,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
 static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
 {
 	struct onenand_chip *this = mtd->priv;
-	char oobbuf[64];
+	u_char *oob_buf = this->oob_buf;
 	int status, i;
 
 	this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize);
@@ -1215,9 +1227,9 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
 	if (status)
 		return status;
 
-	this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
+	this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
 	for (i = 0; i < mtd->oobsize; i++)
-		if (buf[i] != 0xFF && buf[i] != oobbuf[i])
+		if (buf[i] != 0xFF && buf[i] != oob_buf[i])
 			return -EBADMSG;
 
 	return 0;
@@ -1273,6 +1285,112 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
 
 #define NOTALIGNED(x)	((x & (this->subpagesize - 1)) != 0)
 
+static void onenand_panic_wait(struct mtd_info *mtd)
+{
+	struct onenand_chip *this = mtd->priv;
+	unsigned int interrupt;
+	int i;
+	
+	for (i = 0; i < 2000; i++) {
+		interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+		if (interrupt & ONENAND_INT_MASTER)
+			break;
+		udelay(10);
+	}
+}
+
+/**
+ * onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context
+ * @param mtd		MTD device structure
+ * @param to		offset to write to
+ * @param len		number of bytes to write
+ * @param retlen	pointer to variable to store the number of written bytes
+ * @param buf		the data to write
+ *
+ * Write with ECC
+ */
+static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+			 size_t *retlen, const u_char *buf)
+{
+	struct onenand_chip *this = mtd->priv;
+	int column, subpage;
+	int written = 0;
+	int ret = 0;
+
+	if (this->state == FL_PM_SUSPENDED)
+		return -EBUSY;
+
+	/* Wait for any existing operation to clear */
+	onenand_panic_wait(mtd);
+
+	DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n",
+	      (unsigned int) to, (int) len);
+
+	/* Initialize retlen, in case of early exit */
+	*retlen = 0;
+
+	/* Do not allow writes past end of device */
+	if (unlikely((to + len) > mtd->size)) {
+		printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n");
+		return -EINVAL;
+	}
+
+	/* Reject writes, which are not page aligned */
+        if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) {
+                printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n");
+                return -EINVAL;
+        }
+
+	column = to & (mtd->writesize - 1);
+
+	/* Loop until all data write */
+	while (written < len) {
+		int thislen = min_t(int, mtd->writesize - column, len - written);
+		u_char *wbuf = (u_char *) buf;
+
+		this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
+
+		/* Partial page write */
+		subpage = thislen < mtd->writesize;
+		if (subpage) {
+			memset(this->page_buf, 0xff, mtd->writesize);
+			memcpy(this->page_buf + column, buf, thislen);
+			wbuf = this->page_buf;
+		}
+
+		this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
+		this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize);
+
+		this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
+
+		onenand_panic_wait(mtd);
+
+		/* In partial page write we don't update bufferram */
+		onenand_update_bufferram(mtd, to, !ret && !subpage);
+		if (ONENAND_IS_2PLANE(this)) {
+			ONENAND_SET_BUFFERRAM1(this);
+			onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage);
+		}
+
+		if (ret) {
+			printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret);
+			break;
+		}
+
+		written += thislen;
+
+		if (written == len)
+			break;
+
+		column = 0;
+		to += thislen;
+		buf += thislen;
+	}
+
+	*retlen = written;
+	return ret;
+}
+
 /**
  * onenand_fill_auto_oob - [Internal] oob auto-placement transfer
  * @param mtd		MTD device structure
@@ -1419,7 +1537,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
 		}
 
 		/* Only check verify write turn on */
-		ret = onenand_verify(mtd, (u_char *) wbuf, to, thislen);
+		ret = onenand_verify(mtd, buf, to, thislen);
 		if (ret) {
 			printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret);
 			break;
@@ -1435,9 +1553,6 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
 		buf += thislen;
 	}
 
-	/* Deselect and wake up anyone waiting on the device */
-	onenand_release_device(mtd);
-
 	ops->retlen = written;
 
 	return ret;
@@ -2148,7 +2263,7 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
 
 	*retlen = 0;
 
-	density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+	density = onenand_get_density(this->device_id);
 	if (density < ONENAND_DEVICE_DENSITY_512Mb)
 		otp_pages = 20;
 	else
@@ -2299,7 +2414,8 @@ static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
 static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
 			size_t len)
 {
-	unsigned char oob_buf[64];
+	struct onenand_chip *this = mtd->priv;
+	u_char *oob_buf = this->oob_buf;
 	size_t retlen;
 	int ret;
 
@@ -2339,7 +2455,7 @@ static void onenand_check_features(struct mtd_info *mtd)
 	unsigned int density, process;
 
 	/* Lock scheme depends on density and process */
-	density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+	density = onenand_get_density(this->device_id);
 	process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
 
 	/* Lock scheme */
@@ -2388,7 +2504,7 @@ static void onenand_print_device_info(int device, int version)
         vcc = device & ONENAND_DEVICE_VCC_MASK;
         demuxed = device & ONENAND_DEVICE_IS_DEMUX;
         ddp = device & ONENAND_DEVICE_IS_DDP;
-        density = device >> ONENAND_DEVICE_DENSITY_SHIFT;
+        density = onenand_get_density(device);
         printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
                 demuxed ? "" : "Muxed ",
                 ddp ? "(DDP)" : "",
@@ -2480,7 +2596,7 @@ static int onenand_probe(struct mtd_info *mtd)
 	this->device_id = dev_id;
 	this->version_id = ver_id;
 
-	density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+	density = onenand_get_density(dev_id);
 	this->chipsize = (16 << density) << 20;
 	/* Set density mask. it is used for DDP */
 	if (ONENAND_IS_DDP(this))
@@ -2664,6 +2780,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
 	mtd->write = onenand_write;
 	mtd->read_oob = onenand_read_oob;
 	mtd->write_oob = onenand_write_oob;
+	mtd->panic_write = onenand_panic_write;
 #ifdef CONFIG_MTD_ONENAND_OTP
 	mtd->get_fact_prot_info = onenand_get_fact_prot_info;
 	mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index a61351f88ec0..47474903263c 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -59,16 +59,31 @@ static int parse_redboot_partitions(struct mtd_info *master,
 	static char nullstring[] = "unallocated";
 #endif
 
+	if ( directory < 0 ) {
+		offset = master->size + directory * master->erasesize;
+		while (master->block_isbad && 
+		       master->block_isbad(master, offset)) {
+			if (!offset) {
+			nogood:
+				printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
+				return -EIO;
+			}
+			offset -= master->erasesize;
+		}
+	} else {
+		offset = directory * master->erasesize;
+		while (master->block_isbad && 
+		       master->block_isbad(master, offset)) {
+			offset += master->erasesize;
+			if (offset == master->size)
+				goto nogood;
+		}
+	}
 	buf = vmalloc(master->erasesize);
 
 	if (!buf)
 		return -ENOMEM;
 
-	if ( directory < 0 )
-		offset = master->size + directory*master->erasesize;
-	else
-		offset = directory*master->erasesize;
-
 	printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
 	       master->name, offset);
 
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 023653977a1a..6ac81e35355c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -21,11 +21,16 @@
  */
 
 /*
- * This file includes UBI initialization and building of UBI devices. At the
- * moment UBI devices may only be added while UBI is initialized, but dynamic
- * device add/remove functionality is planned. Also, at the moment we only
- * attach UBI devices by scanning, which will become a bottleneck when flashes
- * reach certain large size. Then one may improve UBI and add other methods.
+ * This file includes UBI initialization and building of UBI devices.
+ *
+ * When UBI is initialized, it attaches all the MTD devices specified as the
+ * module load parameters or the kernel boot parameters. If MTD devices were
+ * specified, UBI does not attach any MTD device, but it is possible to do
+ * later using the "UBI control device".
+ *
+ * At the moment we only attach UBI devices by scanning, which will become a
+ * bottleneck when flashes reach certain large size. Then one may improve UBI
+ * and add other methods, although it does not seem to be easy to do.
  */
 
 #include <linux/err.h>
@@ -33,7 +38,9 @@
 #include <linux/moduleparam.h>
 #include <linux/stringify.h>
 #include <linux/stat.h>
+#include <linux/miscdevice.h>
 #include <linux/log2.h>
+#include <linux/kthread.h>
 #include "ubi.h"
 
 /* Maximum length of the 'mtd=' parameter */
@@ -43,13 +50,11 @@
  * struct mtd_dev_param - MTD device parameter description data structure.
  * @name: MTD device name or number string
  * @vid_hdr_offs: VID header offset
- * @data_offs: data offset
  */
 struct mtd_dev_param
 {
 	char name[MTD_PARAM_LEN_MAX];
 	int vid_hdr_offs;
-	int data_offs;
 };
 
 /* Numbers of elements set in the @mtd_dev_param array */
@@ -58,14 +63,27 @@ static int mtd_devs = 0;
 /* MTD devices specification parameters */
 static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
 
-/* Number of UBI devices in system */
-int ubi_devices_cnt;
+/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
+struct class *ubi_class;
+
+/* Slab cache for wear-leveling entries */
+struct kmem_cache *ubi_wl_entry_slab;
+
+/* UBI control character device */
+static struct miscdevice ubi_ctrl_cdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "ubi_ctrl",
+	.fops = &ubi_ctrl_cdev_operations,
+};
 
 /* All UBI devices in system */
-struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
+static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
 
-/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
-struct class *ubi_class;
+/* Serializes UBI devices creations and removals */
+DEFINE_MUTEX(ubi_devices_mutex);
+
+/* Protects @ubi_devices and @ubi->ref_count */
+static DEFINE_SPINLOCK(ubi_devices_lock);
 
 /* "Show" method for files in '/<sysfs>/class/ubi/' */
 static ssize_t ubi_version_show(struct class *class, char *buf)
@@ -101,38 +119,150 @@ static struct device_attribute dev_min_io_size =
 	__ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
 static struct device_attribute dev_bgt_enabled =
 	__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_num =
+	__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+
+/**
+ * ubi_get_device - get UBI device.
+ * @ubi_num: UBI device number
+ *
+ * This function returns UBI device description object for UBI device number
+ * @ubi_num, or %NULL if the device does not exist. This function increases the
+ * device reference count to prevent removal of the device. In other words, the
+ * device cannot be removed if its reference count is not zero.
+ */
+struct ubi_device *ubi_get_device(int ubi_num)
+{
+	struct ubi_device *ubi;
+
+	spin_lock(&ubi_devices_lock);
+	ubi = ubi_devices[ubi_num];
+	if (ubi) {
+		ubi_assert(ubi->ref_count >= 0);
+		ubi->ref_count += 1;
+		get_device(&ubi->dev);
+	}
+	spin_unlock(&ubi_devices_lock);
+
+	return ubi;
+}
+
+/**
+ * ubi_put_device - drop an UBI device reference.
+ * @ubi: UBI device description object
+ */
+void ubi_put_device(struct ubi_device *ubi)
+{
+	spin_lock(&ubi_devices_lock);
+	ubi->ref_count -= 1;
+	put_device(&ubi->dev);
+	spin_unlock(&ubi_devices_lock);
+}
+
+/**
+ * ubi_get_by_major - get UBI device description object by character device
+ *                    major number.
+ * @major: major number
+ *
+ * This function is similar to 'ubi_get_device()', but it searches the device
+ * by its major number.
+ */
+struct ubi_device *ubi_get_by_major(int major)
+{
+	int i;
+	struct ubi_device *ubi;
+
+	spin_lock(&ubi_devices_lock);
+	for (i = 0; i < UBI_MAX_DEVICES; i++) {
+		ubi = ubi_devices[i];
+		if (ubi && MAJOR(ubi->cdev.dev) == major) {
+			ubi_assert(ubi->ref_count >= 0);
+			ubi->ref_count += 1;
+			get_device(&ubi->dev);
+			spin_unlock(&ubi_devices_lock);
+			return ubi;
+		}
+	}
+	spin_unlock(&ubi_devices_lock);
+
+	return NULL;
+}
+
+/**
+ * ubi_major2num - get UBI device number by character device major number.
+ * @major: major number
+ *
+ * This function searches UBI device number object by its major number. If UBI
+ * device was not found, this function returns -ENODEV, otherwise the UBI device
+ * number is returned.
+ */
+int ubi_major2num(int major)
+{
+	int i, ubi_num = -ENODEV;
+
+	spin_lock(&ubi_devices_lock);
+	for (i = 0; i < UBI_MAX_DEVICES; i++) {
+		struct ubi_device *ubi = ubi_devices[i];
+
+		if (ubi && MAJOR(ubi->cdev.dev) == major) {
+			ubi_num = ubi->ubi_num;
+			break;
+		}
+	}
+	spin_unlock(&ubi_devices_lock);
+
+	return ubi_num;
+}
 
 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
 static ssize_t dev_attribute_show(struct device *dev,
 				  struct device_attribute *attr, char *buf)
 {
-	const struct ubi_device *ubi;
+	ssize_t ret;
+	struct ubi_device *ubi;
 
+	/*
+	 * The below code looks weird, but it actually makes sense. We get the
+	 * UBI device reference from the contained 'struct ubi_device'. But it
+	 * is unclear if the device was removed or not yet. Indeed, if the
+	 * device was removed before we increased its reference count,
+	 * 'ubi_get_device()' will return -ENODEV and we fail.
+	 *
+	 * Remember, 'struct ubi_device' is freed in the release function, so
+	 * we still can use 'ubi->ubi_num'.
+	 */
 	ubi = container_of(dev, struct ubi_device, dev);
+	ubi = ubi_get_device(ubi->ubi_num);
+	if (!ubi)
+		return -ENODEV;
+
 	if (attr == &dev_eraseblock_size)
-		return sprintf(buf, "%d\n", ubi->leb_size);
+		ret = sprintf(buf, "%d\n", ubi->leb_size);
 	else if (attr == &dev_avail_eraseblocks)
-		return sprintf(buf, "%d\n", ubi->avail_pebs);
+		ret = sprintf(buf, "%d\n", ubi->avail_pebs);
 	else if (attr == &dev_total_eraseblocks)
-		return sprintf(buf, "%d\n", ubi->good_peb_count);
+		ret = sprintf(buf, "%d\n", ubi->good_peb_count);
 	else if (attr == &dev_volumes_count)
-		return sprintf(buf, "%d\n", ubi->vol_count);
+		ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
 	else if (attr == &dev_max_ec)
-		return sprintf(buf, "%d\n", ubi->max_ec);
+		ret = sprintf(buf, "%d\n", ubi->max_ec);
 	else if (attr == &dev_reserved_for_bad)
-		return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
+		ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
 	else if (attr == &dev_bad_peb_count)
-		return sprintf(buf, "%d\n", ubi->bad_peb_count);
+		ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
 	else if (attr == &dev_max_vol_count)
-		return sprintf(buf, "%d\n", ubi->vtbl_slots);
+		ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
 	else if (attr == &dev_min_io_size)
-		return sprintf(buf, "%d\n", ubi->min_io_size);
+		ret = sprintf(buf, "%d\n", ubi->min_io_size);
 	else if (attr == &dev_bgt_enabled)
-		return sprintf(buf, "%d\n", ubi->thread_enabled);
+		ret = sprintf(buf, "%d\n", ubi->thread_enabled);
+	else if (attr == &dev_mtd_num)
+		ret = sprintf(buf, "%d\n", ubi->mtd->index);
 	else
-		BUG();
+		ret = -EINVAL;
 
-	return 0;
+	ubi_put_device(ubi);
+	return ret;
 }
 
 /* Fake "release" method for UBI devices */
@@ -150,68 +280,44 @@ static int ubi_sysfs_init(struct ubi_device *ubi)
 	int err;
 
 	ubi->dev.release = dev_release;
-	ubi->dev.devt = MKDEV(ubi->major, 0);
+	ubi->dev.devt = ubi->cdev.dev;
 	ubi->dev.class = ubi_class;
 	sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
 	err = device_register(&ubi->dev);
 	if (err)
-		goto out;
+		return err;
 
 	err = device_create_file(&ubi->dev, &dev_eraseblock_size);
 	if (err)
-		goto out_unregister;
+		return err;
 	err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
 	if (err)
-		goto out_eraseblock_size;
+		return err;
 	err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
 	if (err)
-		goto out_avail_eraseblocks;
+		return err;
 	err = device_create_file(&ubi->dev, &dev_volumes_count);
 	if (err)
-		goto out_total_eraseblocks;
+		return err;
 	err = device_create_file(&ubi->dev, &dev_max_ec);
 	if (err)
-		goto out_volumes_count;
+		return err;
 	err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
 	if (err)
-		goto out_volumes_max_ec;
+		return err;
 	err = device_create_file(&ubi->dev, &dev_bad_peb_count);
 	if (err)
-		goto out_reserved_for_bad;
+		return err;
 	err = device_create_file(&ubi->dev, &dev_max_vol_count);
 	if (err)
-		goto out_bad_peb_count;
+		return err;
 	err = device_create_file(&ubi->dev, &dev_min_io_size);
 	if (err)
-		goto out_max_vol_count;
+		return err;
 	err = device_create_file(&ubi->dev, &dev_bgt_enabled);
 	if (err)
-		goto out_min_io_size;
-
-	return 0;
-
-out_min_io_size:
-	device_remove_file(&ubi->dev, &dev_min_io_size);
-out_max_vol_count:
-	device_remove_file(&ubi->dev, &dev_max_vol_count);
-out_bad_peb_count:
-	device_remove_file(&ubi->dev, &dev_bad_peb_count);
-out_reserved_for_bad:
-	device_remove_file(&ubi->dev, &dev_reserved_for_bad);
-out_volumes_max_ec:
-	device_remove_file(&ubi->dev, &dev_max_ec);
-out_volumes_count:
-	device_remove_file(&ubi->dev, &dev_volumes_count);
-out_total_eraseblocks:
-	device_remove_file(&ubi->dev, &dev_total_eraseblocks);
-out_avail_eraseblocks:
-	device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
-out_eraseblock_size:
-	device_remove_file(&ubi->dev, &dev_eraseblock_size);
-out_unregister:
-	device_unregister(&ubi->dev);
-out:
-	ubi_err("failed to initialize sysfs for %s", ubi->ubi_name);
+		return err;
+	err = device_create_file(&ubi->dev, &dev_mtd_num);
 	return err;
 }
 
@@ -221,6 +327,7 @@ out:
  */
 static void ubi_sysfs_close(struct ubi_device *ubi)
 {
+	device_remove_file(&ubi->dev, &dev_mtd_num);
 	device_remove_file(&ubi->dev, &dev_bgt_enabled);
 	device_remove_file(&ubi->dev, &dev_min_io_size);
 	device_remove_file(&ubi->dev, &dev_max_vol_count);
@@ -244,7 +351,7 @@ static void kill_volumes(struct ubi_device *ubi)
 
 	for (i = 0; i < ubi->vtbl_slots; i++)
 		if (ubi->volumes[i])
-			ubi_free_volume(ubi, i);
+			ubi_free_volume(ubi, ubi->volumes[i]);
 }
 
 /**
@@ -259,9 +366,6 @@ static int uif_init(struct ubi_device *ubi)
 	int i, err;
 	dev_t dev;
 
-	mutex_init(&ubi->vtbl_mutex);
-	spin_lock_init(&ubi->volumes_lock);
-
 	sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
 
 	/*
@@ -278,39 +382,40 @@ static int uif_init(struct ubi_device *ubi)
 		return err;
 	}
 
+	ubi_assert(MINOR(dev) == 0);
 	cdev_init(&ubi->cdev, &ubi_cdev_operations);
-	ubi->major = MAJOR(dev);
-	dbg_msg("%s major is %u", ubi->ubi_name, ubi->major);
+	dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev));
 	ubi->cdev.owner = THIS_MODULE;
 
-	dev = MKDEV(ubi->major, 0);
 	err = cdev_add(&ubi->cdev, dev, 1);
 	if (err) {
-		ubi_err("cannot add character device %s", ubi->ubi_name);
+		ubi_err("cannot add character device");
 		goto out_unreg;
 	}
 
 	err = ubi_sysfs_init(ubi);
 	if (err)
-		goto out_cdev;
+		goto out_sysfs;
 
 	for (i = 0; i < ubi->vtbl_slots; i++)
 		if (ubi->volumes[i]) {
-			err = ubi_add_volume(ubi, i);
-			if (err)
+			err = ubi_add_volume(ubi, ubi->volumes[i]);
+			if (err) {
+				ubi_err("cannot add volume %d", i);
 				goto out_volumes;
+			}
 		}
 
 	return 0;
 
 out_volumes:
 	kill_volumes(ubi);
+out_sysfs:
 	ubi_sysfs_close(ubi);
-out_cdev:
 	cdev_del(&ubi->cdev);
 out_unreg:
-	unregister_chrdev_region(MKDEV(ubi->major, 0),
-				 ubi->vtbl_slots + 1);
+	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+	ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
 	return err;
 }
 
@@ -323,7 +428,7 @@ static void uif_close(struct ubi_device *ubi)
 	kill_volumes(ubi);
 	ubi_sysfs_close(ubi);
 	cdev_del(&ubi->cdev);
-	unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1);
+	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
 }
 
 /**
@@ -384,9 +489,9 @@ out_si:
  * assumed:
  *   o EC header is always at offset zero - this cannot be changed;
  *   o VID header starts just after the EC header at the closest address
- *   aligned to @io->@hdrs_min_io_size;
+ *     aligned to @io->hdrs_min_io_size;
  *   o data starts just after the VID header at the closest address aligned to
- *     @io->@min_io_size
+ *     @io->min_io_size
  *
  * This function returns zero in case of success and a negative error code in
  * case of failure.
@@ -407,6 +512,9 @@ static int io_init(struct ubi_device *ubi)
 		return -EINVAL;
 	}
 
+	if (ubi->vid_hdr_offset < 0)
+		return -EINVAL;
+
 	/*
 	 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
 	 * physical eraseblocks maximum.
@@ -424,7 +532,8 @@ static int io_init(struct ubi_device *ubi)
 
 	/* Make sure minimal I/O unit is power of 2 */
 	if (!is_power_of_2(ubi->min_io_size)) {
-		ubi_err("bad min. I/O unit");
+		ubi_err("min. I/O unit (%d) is not power of 2",
+			ubi->min_io_size);
 		return -EINVAL;
 	}
 
@@ -453,10 +562,8 @@ static int io_init(struct ubi_device *ubi)
 	}
 
 	/* Similar for the data offset */
-	if (ubi->leb_start == 0) {
-		ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize;
-		ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
-	}
+	ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
+	ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
 
 	dbg_msg("vid_hdr_offset   %d", ubi->vid_hdr_offset);
 	dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
@@ -514,76 +621,147 @@ static int io_init(struct ubi_device *ubi)
 }
 
 /**
- * attach_mtd_dev - attach an MTD device.
- * @mtd_dev: MTD device name or number string
- * @vid_hdr_offset: VID header offset
- * @data_offset: data offset
+ * autoresize - re-size the volume which has the "auto-resize" flag set.
+ * @ubi: UBI device description object
+ * @vol_id: ID of the volume to re-size
  *
- * This function attaches an MTD device to UBI. It first treats @mtd_dev as the
- * MTD device name, and tries to open it by this name. If it is unable to open,
- * it tries to convert @mtd_dev to an integer and open the MTD device by its
- * number. Returns zero in case of success and a negative error code in case of
- * failure.
+ * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
+ * the volume table to the largest possible size. See comments in ubi-header.h
+ * for more description of the flag. Returns zero in case of success and a
+ * negative error code in case of failure.
  */
-static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
-			  int data_offset)
+static int autoresize(struct ubi_device *ubi, int vol_id)
 {
-	struct ubi_device *ubi;
-	struct mtd_info *mtd;
-	int i, err;
+	struct ubi_volume_desc desc;
+	struct ubi_volume *vol = ubi->volumes[vol_id];
+	int err, old_reserved_pebs = vol->reserved_pebs;
 
-	mtd = get_mtd_device_nm(mtd_dev);
-	if (IS_ERR(mtd)) {
-		int mtd_num;
-		char *endp;
+	/*
+	 * Clear the auto-resize flag in the volume in-memory copy of the
+	 * volume table, and 'ubi_resize_volume()' will propogate this change
+	 * to the flash.
+	 */
+	ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
 
-		if (PTR_ERR(mtd) != -ENODEV)
-			return PTR_ERR(mtd);
+	if (ubi->avail_pebs == 0) {
+		struct ubi_vtbl_record vtbl_rec;
 
 		/*
-		 * Probably this is not MTD device name but MTD device number -
-		 * check this out.
+		 * No avalilable PEBs to re-size the volume, clear the flag on
+		 * flash and exit.
 		 */
-		mtd_num = simple_strtoul(mtd_dev, &endp, 0);
-		if (*endp != '\0' || mtd_dev == endp) {
-			ubi_err("incorrect MTD device: \"%s\"", mtd_dev);
-			return -ENODEV;
-		}
-
-		mtd = get_mtd_device(NULL, mtd_num);
-		if (IS_ERR(mtd))
-			return PTR_ERR(mtd);
+		memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
+		       sizeof(struct ubi_vtbl_record));
+		err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+		if (err)
+			ubi_err("cannot clean auto-resize flag for volume %d",
+				vol_id);
+	} else {
+		desc.vol = vol;
+		err = ubi_resize_volume(&desc,
+					old_reserved_pebs + ubi->avail_pebs);
+		if (err)
+			ubi_err("cannot auto-resize volume %d", vol_id);
 	}
 
-	/* Check if we already have the same MTD device attached */
-	for (i = 0; i < ubi_devices_cnt; i++)
-		if (ubi_devices[i]->mtd->index == mtd->index) {
-			ubi_err("mtd%d is already attached to ubi%d",
+	if (err)
+		return err;
+
+	ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
+		vol->name, old_reserved_pebs, vol->reserved_pebs);
+	return 0;
+}
+
+/**
+ * ubi_attach_mtd_dev - attach an MTD device.
+ * @mtd_dev: MTD device description object
+ * @ubi_num: number to assign to the new UBI device
+ * @vid_hdr_offset: VID header offset
+ *
+ * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
+ * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
+ * which case this function finds a vacant device nubert and assings it
+ * automatically. Returns the new UBI device number in case of success and a
+ * negative error code in case of failure.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
+ */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
+{
+	struct ubi_device *ubi;
+	int i, err;
+
+	/*
+	 * Check if we already have the same MTD device attached.
+	 *
+	 * Note, this function assumes that UBI devices creations and deletions
+	 * are serialized, so it does not take the &ubi_devices_lock.
+	 */
+	for (i = 0; i < UBI_MAX_DEVICES; i++) {
+		ubi = ubi_devices[i];
+		if (ubi && mtd->index == ubi->mtd->index) {
+			dbg_err("mtd%d is already attached to ubi%d",
 				mtd->index, i);
-			err = -EINVAL;
-			goto out_mtd;
+			return -EEXIST;
 		}
+	}
 
-	ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device),
-						     GFP_KERNEL);
-	if (!ubi) {
-		err = -ENOMEM;
-		goto out_mtd;
+	/*
+	 * Make sure this MTD device is not emulated on top of an UBI volume
+	 * already. Well, generally this recursion works fine, but there are
+	 * different problems like the UBI module takes a reference to itself
+	 * by attaching (and thus, opening) the emulated MTD device. This
+	 * results in inability to unload the module. And in general it makes
+	 * no sense to attach emulated MTD devices, so we prohibit this.
+	 */
+	if (mtd->type == MTD_UBIVOLUME) {
+		ubi_err("refuse attaching mtd%d - it is already emulated on "
+			"top of UBI", mtd->index);
+		return -EINVAL;
 	}
 
-	ubi->ubi_num = ubi_devices_cnt;
-	ubi->mtd = mtd;
+	if (ubi_num == UBI_DEV_NUM_AUTO) {
+		/* Search for an empty slot in the @ubi_devices array */
+		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+			if (!ubi_devices[ubi_num])
+				break;
+		if (ubi_num == UBI_MAX_DEVICES) {
+			dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
+			return -ENFILE;
+		}
+	} else {
+		if (ubi_num >= UBI_MAX_DEVICES)
+			return -EINVAL;
+
+		/* Make sure ubi_num is not busy */
+		if (ubi_devices[ubi_num]) {
+			dbg_err("ubi%d already exists", ubi_num);
+			return -EEXIST;
+		}
+	}
 
-	dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d",
-		ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset);
+	ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
+	if (!ubi)
+		return -ENOMEM;
 
+	ubi->mtd = mtd;
+	ubi->ubi_num = ubi_num;
 	ubi->vid_hdr_offset = vid_hdr_offset;
-	ubi->leb_start = data_offset;
+	ubi->autoresize_vol_id = -1;
+
+	mutex_init(&ubi->buf_mutex);
+	mutex_init(&ubi->ckvol_mutex);
+	mutex_init(&ubi->volumes_mutex);
+	spin_lock_init(&ubi->volumes_lock);
+
+	dbg_msg("attaching mtd%d to ubi%d: VID header offset %d",
+		mtd->index, ubi_num, vid_hdr_offset);
+
 	err = io_init(ubi);
 	if (err)
 		goto out_free;
 
-	mutex_init(&ubi->buf_mutex);
 	ubi->peb_buf1 = vmalloc(ubi->peb_size);
 	if (!ubi->peb_buf1)
 		goto out_free;
@@ -605,12 +783,26 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
 		goto out_free;
 	}
 
+	if (ubi->autoresize_vol_id != -1) {
+		err = autoresize(ubi, ubi->autoresize_vol_id);
+		if (err)
+			goto out_detach;
+	}
+
 	err = uif_init(ubi);
 	if (err)
 		goto out_detach;
 
-	ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt);
-	ubi_msg("MTD device name:            \"%s\"", ubi->mtd->name);
+	ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
+	if (IS_ERR(ubi->bgt_thread)) {
+		err = PTR_ERR(ubi->bgt_thread);
+		ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
+			err);
+		goto out_uif;
+	}
+
+	ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
+	ubi_msg("MTD device name:            \"%s\"", mtd->name);
 	ubi_msg("MTD device size:            %llu MiB", ubi->flash_size >> 20);
 	ubi_msg("physical eraseblock size:   %d bytes (%d KiB)",
 		ubi->peb_size, ubi->peb_size >> 10);
@@ -638,9 +830,11 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
 		wake_up_process(ubi->bgt_thread);
 	}
 
-	ubi_devices_cnt += 1;
-	return 0;
+	ubi_devices[ubi_num] = ubi;
+	return ubi_num;
 
+out_uif:
+	uif_close(ubi);
 out_detach:
 	ubi_eba_close(ubi);
 	ubi_wl_close(ubi);
@@ -652,21 +846,58 @@ out_free:
 	vfree(ubi->dbg_peb_buf);
 #endif
 	kfree(ubi);
-out_mtd:
-	put_mtd_device(mtd);
-	ubi_devices[ubi_devices_cnt] = NULL;
 	return err;
 }
 
 /**
- * detach_mtd_dev - detach an MTD device.
- * @ubi: UBI device description object
+ * ubi_detach_mtd_dev - detach an MTD device.
+ * @ubi_num: UBI device number to detach from
+ * @anyway: detach MTD even if device reference count is not zero
+ *
+ * This function destroys an UBI device number @ubi_num and detaches the
+ * underlying MTD device. Returns zero in case of success and %-EBUSY if the
+ * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
+ * exist.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
  */
-static void detach_mtd_dev(struct ubi_device *ubi)
+int ubi_detach_mtd_dev(int ubi_num, int anyway)
 {
-	int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index;
+	struct ubi_device *ubi;
+
+	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+		return -EINVAL;
+
+	spin_lock(&ubi_devices_lock);
+	ubi = ubi_devices[ubi_num];
+	if (!ubi) {
+		spin_unlock(&ubi_devices_lock);
+		return -EINVAL;
+	}
+
+	if (ubi->ref_count) {
+		if (!anyway) {
+			spin_unlock(&ubi_devices_lock);
+			return -EBUSY;
+		}
+		/* This may only happen if there is a bug */
+		ubi_err("%s reference count %d, destroy anyway",
+			ubi->ubi_name, ubi->ref_count);
+	}
+	ubi_devices[ubi_num] = NULL;
+	spin_unlock(&ubi_devices_lock);
 
+	ubi_assert(ubi_num == ubi->ubi_num);
 	dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+
+	/*
+	 * Before freeing anything, we have to stop the background thread to
+	 * prevent it from doing anything on this device while we are freeing.
+	 */
+	if (ubi->bgt_thread)
+		kthread_stop(ubi->bgt_thread);
+
 	uif_close(ubi);
 	ubi_eba_close(ubi);
 	ubi_wl_close(ubi);
@@ -677,11 +908,37 @@ static void detach_mtd_dev(struct ubi_device *ubi)
 #ifdef CONFIG_MTD_UBI_DEBUG
 	vfree(ubi->dbg_peb_buf);
 #endif
-	kfree(ubi_devices[ubi_num]);
-	ubi_devices[ubi_num] = NULL;
-	ubi_devices_cnt -= 1;
-	ubi_assert(ubi_devices_cnt >= 0);
-	ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num);
+	ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
+	kfree(ubi);
+	return 0;
+}
+
+/**
+ * find_mtd_device - open an MTD device by its name or number.
+ * @mtd_dev: name or number of the device
+ *
+ * This function tries to open and MTD device described by @mtd_dev string,
+ * which is first treated as an ASCII number, and if it is not true, it is
+ * treated as MTD device name. Returns MTD device description object in case of
+ * success and a negative error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
+{
+	struct mtd_info *mtd;
+	int mtd_num;
+	char *endp;
+
+	mtd_num = simple_strtoul(mtd_dev, &endp, 0);
+	if (*endp != '\0' || mtd_dev == endp) {
+		/*
+		 * This does not look like an ASCII integer, probably this is
+		 * MTD device name.
+		 */
+		mtd = get_mtd_device_nm(mtd_dev);
+	} else
+		mtd = get_mtd_device(NULL, mtd_num);
+
+	return mtd;
 }
 
 static int __init ubi_init(void)
@@ -693,47 +950,96 @@ static int __init ubi_init(void)
 	BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
 
 	if (mtd_devs > UBI_MAX_DEVICES) {
-		printk("UBI error: too many MTD devices, maximum is %d\n",
-		       UBI_MAX_DEVICES);
+		printk(KERN_ERR "UBI error: too many MTD devices, "
+		       "maximum is %d\n", UBI_MAX_DEVICES);
 		return -EINVAL;
 	}
 
+	/* Create base sysfs directory and sysfs files */
 	ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
-	if (IS_ERR(ubi_class))
-		return PTR_ERR(ubi_class);
+	if (IS_ERR(ubi_class)) {
+		err = PTR_ERR(ubi_class);
+		printk(KERN_ERR "UBI error: cannot create UBI class\n");
+		goto out;
+	}
 
 	err = class_create_file(ubi_class, &ubi_version);
-	if (err)
+	if (err) {
+		printk(KERN_ERR "UBI error: cannot create sysfs file\n");
 		goto out_class;
+	}
+
+	err = misc_register(&ubi_ctrl_cdev);
+	if (err) {
+		printk(KERN_ERR "UBI error: cannot register device\n");
+		goto out_version;
+	}
+
+	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+						sizeof(struct ubi_wl_entry),
+						0, 0, NULL);
+	if (!ubi_wl_entry_slab)
+		goto out_dev_unreg;
 
 	/* Attach MTD devices */
 	for (i = 0; i < mtd_devs; i++) {
 		struct mtd_dev_param *p = &mtd_dev_param[i];
+		struct mtd_info *mtd;
 
 		cond_resched();
-		err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs);
-		if (err)
+
+		mtd = open_mtd_device(p->name);
+		if (IS_ERR(mtd)) {
+			err = PTR_ERR(mtd);
+			goto out_detach;
+		}
+
+		mutex_lock(&ubi_devices_mutex);
+		err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
+					 p->vid_hdr_offs);
+		mutex_unlock(&ubi_devices_mutex);
+		if (err < 0) {
+			put_mtd_device(mtd);
+			printk(KERN_ERR "UBI error: cannot attach %s\n",
+			       p->name);
 			goto out_detach;
+		}
 	}
 
 	return 0;
 
 out_detach:
 	for (k = 0; k < i; k++)
-		detach_mtd_dev(ubi_devices[k]);
+		if (ubi_devices[k]) {
+			mutex_lock(&ubi_devices_mutex);
+			ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+			mutex_unlock(&ubi_devices_mutex);
+		}
+	kmem_cache_destroy(ubi_wl_entry_slab);
+out_dev_unreg:
+	misc_deregister(&ubi_ctrl_cdev);
+out_version:
 	class_remove_file(ubi_class, &ubi_version);
 out_class:
 	class_destroy(ubi_class);
+out:
+	printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err);
 	return err;
 }
 module_init(ubi_init);
 
 static void __exit ubi_exit(void)
 {
-	int i, n = ubi_devices_cnt;
+	int i;
 
-	for (i = 0; i < n; i++)
-		detach_mtd_dev(ubi_devices[i]);
+	for (i = 0; i < UBI_MAX_DEVICES; i++)
+		if (ubi_devices[i]) {
+			mutex_lock(&ubi_devices_mutex);
+			ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
+			mutex_unlock(&ubi_devices_mutex);
+		}
+	kmem_cache_destroy(ubi_wl_entry_slab);
+	misc_deregister(&ubi_ctrl_cdev);
 	class_remove_file(ubi_class, &ubi_version);
 	class_destroy(ubi_class);
 }
@@ -754,7 +1060,8 @@ static int __init bytes_str_to_int(const char *str)
 
 	result = simple_strtoul(str, &endp, 0);
 	if (str == endp || result < 0) {
-		printk("UBI error: incorrect bytes count: \"%s\"\n", str);
+		printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
+		       str);
 		return -EINVAL;
 	}
 
@@ -764,15 +1071,14 @@ static int __init bytes_str_to_int(const char *str)
 	case 'M':
 		result *= 1024;
 	case 'K':
-	case 'k':
 		result *= 1024;
-		if (endp[1] == 'i' && (endp[2] == '\0' ||
-			  endp[2] == 'B'  || endp[2] == 'b'))
+		if (endp[1] == 'i' && endp[2] == 'B')
 			endp += 2;
 	case '\0':
 		break;
 	default:
-		printk("UBI error: incorrect bytes count: \"%s\"\n", str);
+		printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
+		       str);
 		return -EINVAL;
 	}
 
@@ -793,23 +1099,27 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
 	struct mtd_dev_param *p;
 	char buf[MTD_PARAM_LEN_MAX];
 	char *pbuf = &buf[0];
-	char *tokens[3] = {NULL, NULL, NULL};
+	char *tokens[2] = {NULL, NULL};
+
+	if (!val)
+		return -EINVAL;
 
 	if (mtd_devs == UBI_MAX_DEVICES) {
-		printk("UBI error: too many parameters, max. is %d\n",
+		printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
 		       UBI_MAX_DEVICES);
 		return -EINVAL;
 	}
 
 	len = strnlen(val, MTD_PARAM_LEN_MAX);
 	if (len == MTD_PARAM_LEN_MAX) {
-		printk("UBI error: parameter \"%s\" is too long, max. is %d\n",
-		       val, MTD_PARAM_LEN_MAX);
+		printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
+		       "max. is %d\n", val, MTD_PARAM_LEN_MAX);
 		return -EINVAL;
 	}
 
 	if (len == 0) {
-		printk("UBI warning: empty 'mtd=' parameter - ignored\n");
+		printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
+		       "ignored\n");
 		return 0;
 	}
 
@@ -819,11 +1129,12 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
 	if (buf[len - 1] == '\n')
 		buf[len - 1] = '\0';
 
-	for (i = 0; i < 3; i++)
+	for (i = 0; i < 2; i++)
 		tokens[i] = strsep(&pbuf, ",");
 
 	if (pbuf) {
-		printk("UBI error: too many arguments at \"%s\"\n", val);
+		printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
+		       val);
 		return -EINVAL;
 	}
 
@@ -832,13 +1143,9 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
 
 	if (tokens[1])
 		p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
-	if (tokens[2])
-		p->data_offs = bytes_str_to_int(tokens[2]);
 
 	if (p->vid_hdr_offs < 0)
 		return p->vid_hdr_offs;
-	if (p->data_offs < 0)
-		return p->data_offs;
 
 	mtd_devs += 1;
 	return 0;
@@ -846,16 +1153,15 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
 
 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
 MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
-		      "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. "
+		      "mtd=<name|num>[,<vid_hdr_offs>].\n"
 		      "Multiple \"mtd\" parameters may be specified.\n"
-		      "MTD devices may be specified by their number or name. "
-		      "Optional \"vid_hdr_offs\" and \"data_offs\" parameters "
-		      "specify UBI VID header position and data starting "
-		      "position to be used by UBI.\n"
-		      "Example: mtd=content,1984,2048 mtd=4 - attach MTD device"
-		      "with name content using VID header offset 1984 and data "
-		      "start 2048, and MTD device number 4 using default "
-		      "offsets");
+		      "MTD devices may be specified by their number or name.\n"
+		      "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
+		      "header position and data starting position to be used "
+		      "by UBI.\n"
+		      "Example: mtd=content,1984 mtd=4 - attach MTD device"
+		      "with name \"content\" using VID header offset 1984, and "
+		      "MTD device number 4 with default VID header offset.");
 
 MODULE_VERSION(__stringify(UBI_VERSION));
 MODULE_DESCRIPTION("UBI - Unsorted Block Images");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index fe4da1e96c52..9d6aae5449b6 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -28,6 +28,11 @@
  *
  * Major and minor numbers are assigned dynamically to both UBI and volume
  * character devices.
+ *
+ * Well, there is the third kind of character devices - the UBI control
+ * character device, which allows to manipulate by UBI devices - create and
+ * delete them. In other words, it is used for attaching and detaching MTD
+ * devices.
  */
 
 #include <linux/module.h>
@@ -39,34 +44,6 @@
 #include <asm/div64.h>
 #include "ubi.h"
 
-/*
- * Maximum sequence numbers of UBI and volume character device IOCTLs (direct
- * logical eraseblock erase is a debug-only feature).
- */
-#define UBI_CDEV_IOC_MAX_SEQ 2
-#ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
-#define VOL_CDEV_IOC_MAX_SEQ 1
-#else
-#define VOL_CDEV_IOC_MAX_SEQ 2
-#endif
-
-/**
- * major_to_device - get UBI device object by character device major number.
- * @major: major number
- *
- * This function returns a pointer to the UBI device object.
- */
-static struct ubi_device *major_to_device(int major)
-{
-	int i;
-
-	for (i = 0; i < ubi_devices_cnt; i++)
-		if (ubi_devices[i] && ubi_devices[i]->major == major)
-			return ubi_devices[i];
-	BUG();
-	return NULL;
-}
-
 /**
  * get_exclusive - get exclusive access to an UBI volume.
  * @desc: volume descriptor
@@ -124,9 +101,11 @@ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
 static int vol_cdev_open(struct inode *inode, struct file *file)
 {
 	struct ubi_volume_desc *desc;
-	const struct ubi_device *ubi = major_to_device(imajor(inode));
-	int vol_id = iminor(inode) - 1;
-	int mode;
+	int vol_id = iminor(inode) - 1, mode, ubi_num;
+
+	ubi_num = ubi_major2num(imajor(inode));
+	if (ubi_num < 0)
+		return ubi_num;
 
 	if (file->f_mode & FMODE_WRITE)
 		mode = UBI_READWRITE;
@@ -135,7 +114,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
 
 	dbg_msg("open volume %d, mode %d", vol_id, mode);
 
-	desc = ubi_open_volume(ubi->ubi_num, vol_id, mode);
+	desc = ubi_open_volume(ubi_num, vol_id, mode);
 	if (IS_ERR(desc))
 		return PTR_ERR(desc);
 
@@ -153,8 +132,15 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
 	if (vol->updating) {
 		ubi_warn("update of volume %d not finished, volume is damaged",
 			 vol->vol_id);
+		ubi_assert(!vol->changing_leb);
 		vol->updating = 0;
 		vfree(vol->upd_buf);
+	} else if (vol->changing_leb) {
+		dbg_msg("only %lld of %lld bytes received for atomic LEB change"
+			" for volume %d:%d, cancel", vol->upd_received,
+			vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
+		vol->changing_leb = 0;
+		vfree(vol->upd_buf);
 	}
 
 	ubi_close_volume(desc);
@@ -205,13 +191,13 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
 	struct ubi_volume_desc *desc = file->private_data;
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
-	int err, lnum, off, len,  vol_id = desc->vol->vol_id, tbuf_size;
+	int err, lnum, off, len,  tbuf_size;
 	size_t count_save = count;
 	void *tbuf;
 	uint64_t tmp;
 
 	dbg_msg("read %zd bytes from offset %lld of volume %d",
-		count, *offp, vol_id);
+		count, *offp, vol->vol_id);
 
 	if (vol->updating) {
 		dbg_err("updating");
@@ -225,7 +211,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
 		return 0;
 
 	if (vol->corrupted)
-		dbg_msg("read from corrupted volume %d", vol_id);
+		dbg_msg("read from corrupted volume %d", vol->vol_id);
 
 	if (*offp + count > vol->used_bytes)
 		count_save = count = vol->used_bytes - *offp;
@@ -249,7 +235,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
 		if (off + len >= vol->usable_leb_size)
 			len = vol->usable_leb_size - off;
 
-		err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0);
+		err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
 		if (err)
 			break;
 
@@ -289,13 +275,13 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
 	struct ubi_volume_desc *desc = file->private_data;
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
-	int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0;
+	int lnum, off, len, tbuf_size, err = 0;
 	size_t count_save = count;
 	char *tbuf;
 	uint64_t tmp;
 
 	dbg_msg("requested: write %zd bytes to offset %lld of volume %u",
-		count, *offp, desc->vol->vol_id);
+		count, *offp, vol->vol_id);
 
 	if (vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
@@ -339,7 +325,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
 			break;
 		}
 
-		err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len,
+		err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
 					UBI_UNKNOWN);
 		if (err)
 			break;
@@ -372,22 +358,32 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
 
-	if (!vol->updating)
+	if (!vol->updating && !vol->changing_leb)
 		return vol_cdev_direct_write(file, buf, count, offp);
 
-	err = ubi_more_update_data(ubi, vol->vol_id, buf, count);
+	if (vol->updating)
+		err = ubi_more_update_data(ubi, vol, buf, count);
+	else
+		err = ubi_more_leb_change_data(ubi, vol, buf, count);
+
 	if (err < 0) {
-		ubi_err("cannot write %zd bytes of update data", count);
+		ubi_err("cannot accept more %zd bytes of data, error %d",
+			count, err);
 		return err;
 	}
 
 	if (err) {
 		/*
-		 * Update is finished, @err contains number of actually written
-		 * bytes now.
+		 * The operation is finished, @err contains number of actually
+		 * written bytes.
 		 */
 		count = err;
 
+		if (vol->changing_leb) {
+			revoke_exclusive(desc, UBI_READWRITE);
+			return count;
+		}
+
 		err = ubi_check_volume(ubi, vol->vol_id);
 		if (err < 0)
 			return err;
@@ -402,7 +398,6 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
 		revoke_exclusive(desc, UBI_READWRITE);
 	}
 
-	*offp += count;
 	return count;
 }
 
@@ -447,11 +442,46 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
 		if (err < 0)
 			break;
 
-		err = ubi_start_update(ubi, vol->vol_id, bytes);
+		err = ubi_start_update(ubi, vol, bytes);
 		if (bytes == 0)
 			revoke_exclusive(desc, UBI_READWRITE);
+		break;
+	}
+
+	/* Atomic logical eraseblock change command */
+	case UBI_IOCEBCH:
+	{
+		struct ubi_leb_change_req req;
+
+		err = copy_from_user(&req, argp,
+				     sizeof(struct ubi_leb_change_req));
+		if (err) {
+			err = -EFAULT;
+			break;
+		}
+
+		if (desc->mode == UBI_READONLY ||
+		    vol->vol_type == UBI_STATIC_VOLUME) {
+			err = -EROFS;
+			break;
+		}
+
+		/* Validate the request */
+		err = -EINVAL;
+		if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+		    req.bytes < 0 || req.lnum >= vol->usable_leb_size)
+			break;
+		if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
+		    req.dtype != UBI_UNKNOWN)
+			break;
+
+		err = get_exclusive(desc);
+		if (err < 0)
+			break;
 
-		file->f_pos = 0;
+		err = ubi_start_leb_change(ubi, vol, &req);
+		if (req.bytes == 0)
+			revoke_exclusive(desc, UBI_READWRITE);
 		break;
 	}
 
@@ -467,7 +497,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
 			break;
 		}
 
-		if (desc->mode == UBI_READONLY) {
+		if (desc->mode == UBI_READONLY ||
+		    vol->vol_type == UBI_STATIC_VOLUME) {
 			err = -EROFS;
 			break;
 		}
@@ -477,13 +508,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
 			break;
 		}
 
-		if (vol->vol_type != UBI_DYNAMIC_VOLUME) {
-			err = -EROFS;
-			break;
-		}
-
 		dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
-		err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum);
+		err = ubi_eba_unmap_leb(ubi, vol, lnum);
 		if (err)
 			break;
 
@@ -580,9 +606,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
 	if (!capable(CAP_SYS_RESOURCE))
 		return -EPERM;
 
-	ubi = major_to_device(imajor(inode));
-	if (IS_ERR(ubi))
-		return PTR_ERR(ubi);
+	ubi = ubi_get_by_major(imajor(inode));
+	if (!ubi)
+		return -ENODEV;
 
 	switch (cmd) {
 	/* Create volume command */
@@ -591,8 +617,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
 		struct ubi_mkvol_req req;
 
 		dbg_msg("create volume");
-		err = copy_from_user(&req, argp,
-				       sizeof(struct ubi_mkvol_req));
+		err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
 		if (err) {
 			err = -EFAULT;
 			break;
@@ -604,7 +629,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
 
 		req.name[req.name_len] = '\0';
 
+		mutex_lock(&ubi->volumes_mutex);
 		err = ubi_create_volume(ubi, &req);
+		mutex_unlock(&ubi->volumes_mutex);
 		if (err)
 			break;
 
@@ -633,10 +660,16 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
 			break;
 		}
 
+		mutex_lock(&ubi->volumes_mutex);
 		err = ubi_remove_volume(desc);
-		if (err)
-			ubi_close_volume(desc);
+		mutex_unlock(&ubi->volumes_mutex);
 
+		/*
+		 * The volume is deleted (unless an error occurred), and the
+		 * 'struct ubi_volume' object will be freed when
+		 * 'ubi_close_volume()' will call 'put_device()'.
+		 */
+		ubi_close_volume(desc);
 		break;
 	}
 
@@ -648,8 +681,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
 		struct ubi_rsvol_req req;
 
 		dbg_msg("re-size volume");
-		err = copy_from_user(&req, argp,
-				       sizeof(struct ubi_rsvol_req));
+		err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
 		if (err) {
 			err = -EFAULT;
 			break;
@@ -669,7 +701,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
 		pebs = !!do_div(tmp, desc->vol->usable_leb_size);
 		pebs += tmp;
 
+		mutex_lock(&ubi->volumes_mutex);
 		err = ubi_resize_volume(desc, pebs);
+		mutex_unlock(&ubi->volumes_mutex);
 		ubi_close_volume(desc);
 		break;
 	}
@@ -679,9 +713,93 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
 		break;
 	}
 
+	ubi_put_device(ubi);
 	return err;
 }
 
+static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
+			   unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	void __user *argp = (void __user *)arg;
+
+	if (!capable(CAP_SYS_RESOURCE))
+		return -EPERM;
+
+	switch (cmd) {
+	/* Attach an MTD device command */
+	case UBI_IOCATT:
+	{
+		struct ubi_attach_req req;
+		struct mtd_info *mtd;
+
+		dbg_msg("attach MTD device");
+		err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
+		if (err) {
+			err = -EFAULT;
+			break;
+		}
+
+		if (req.mtd_num < 0 ||
+		    (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
+			err = -EINVAL;
+			break;
+		}
+
+		mtd = get_mtd_device(NULL, req.mtd_num);
+		if (IS_ERR(mtd)) {
+			err = PTR_ERR(mtd);
+			break;
+		}
+
+		/*
+		 * Note, further request verification is done by
+		 * 'ubi_attach_mtd_dev()'.
+		 */
+		mutex_lock(&ubi_devices_mutex);
+		err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
+		mutex_unlock(&ubi_devices_mutex);
+		if (err < 0)
+			put_mtd_device(mtd);
+		else
+			/* @err contains UBI device number */
+			err = put_user(err, (__user int32_t *)argp);
+
+		break;
+	}
+
+	/* Detach an MTD device command */
+	case UBI_IOCDET:
+	{
+		int ubi_num;
+
+		dbg_msg("dettach MTD device");
+		err = get_user(ubi_num, (__user int32_t *)argp);
+		if (err) {
+			err = -EFAULT;
+			break;
+		}
+
+		mutex_lock(&ubi_devices_mutex);
+		err = ubi_detach_mtd_dev(ubi_num, 0);
+		mutex_unlock(&ubi_devices_mutex);
+		break;
+	}
+
+	default:
+		err = -ENOTTY;
+		break;
+	}
+
+	return err;
+}
+
+/* UBI control character device operations */
+struct file_operations ubi_ctrl_cdev_operations = {
+	.ioctl = ctrl_cdev_ioctl,
+	.owner = THIS_MODULE,
+};
+
 /* UBI character device operations */
 struct file_operations ubi_cdev_operations = {
 	.owner = THIS_MODULE,
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 467722eb618b..51c40b17f1ec 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -39,8 +39,9 @@
 
 #ifdef CONFIG_MTD_UBI_DEBUG_MSG
 /* Generic debugging message */
-#define dbg_msg(fmt, ...) \
-	printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__)
+#define dbg_msg(fmt, ...)                                    \
+	printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
+	       current->pid, __FUNCTION__, ##__VA_ARGS__)
 
 #define ubi_dbg_dump_stack() dump_stack()
 
@@ -76,36 +77,28 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
 
 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
 /* Messages from the eraseblock association unit */
-#define dbg_eba(fmt, ...) \
-	printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \
-	       ##__VA_ARGS__)
+#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
 #else
 #define dbg_eba(fmt, ...) ({})
 #endif
 
 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
 /* Messages from the wear-leveling unit */
-#define dbg_wl(fmt, ...) \
-	printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \
-	       ##__VA_ARGS__)
+#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
 #else
 #define dbg_wl(fmt, ...) ({})
 #endif
 
 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
 /* Messages from the input/output unit */
-#define dbg_io(fmt, ...) \
-	printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \
-	       ##__VA_ARGS__)
+#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
 #else
 #define dbg_io(fmt, ...) ({})
 #endif
 
 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
 /* Initialization and build messages */
-#define dbg_bld(fmt, ...) \
-	printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \
-	       ##__VA_ARGS__)
+#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
 #else
 #define dbg_bld(fmt, ...) ({})
 #endif
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 880fa3690352..7ce91ca742b1 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,7 +31,7 @@
  * logical eraseblock it is locked for reading or writing. The per-logical
  * eraseblock locking is implemented by means of the lock tree. The lock tree
  * is an RB-tree which refers all the currently locked logical eraseblocks. The
- * lock tree elements are &struct ltree_entry objects. They are indexed by
+ * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
  * (@vol_id, @lnum) pairs.
  *
  * EBA also maintains the global sequence counter which is incremented each
@@ -50,29 +50,6 @@
 #define EBA_RESERVED_PEBS 1
 
 /**
- * struct ltree_entry - an entry in the lock tree.
- * @rb: links RB-tree nodes
- * @vol_id: volume ID of the locked logical eraseblock
- * @lnum: locked logical eraseblock number
- * @users: how many tasks are using this logical eraseblock or wait for it
- * @mutex: read/write mutex to implement read/write access serialization to
- * the (@vol_id, @lnum) logical eraseblock
- *
- * When a logical eraseblock is being locked - corresponding &struct ltree_entry
- * object is inserted to the lock tree (@ubi->ltree).
- */
-struct ltree_entry {
-	struct rb_node rb;
-	int vol_id;
-	int lnum;
-	int users;
-	struct rw_semaphore mutex;
-};
-
-/* Slab cache for lock-tree entries */
-static struct kmem_cache *ltree_slab;
-
-/**
  * next_sqnum - get next sequence number.
  * @ubi: UBI device description object
  *
@@ -101,7 +78,7 @@ static unsigned long long next_sqnum(struct ubi_device *ubi)
  */
 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
 {
-	if (vol_id == UBI_LAYOUT_VOL_ID)
+	if (vol_id == UBI_LAYOUT_VOLUME_ID)
 		return UBI_LAYOUT_VOLUME_COMPAT;
 	return 0;
 }
@@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
  * @vol_id: volume ID
  * @lnum: logical eraseblock number
  *
- * This function returns a pointer to the corresponding &struct ltree_entry
+ * This function returns a pointer to the corresponding &struct ubi_ltree_entry
  * object if the logical eraseblock is locked and %NULL if it is not.
  * @ubi->ltree_lock has to be locked.
  */
-static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
-					int lnum)
+static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
+					    int lnum)
 {
 	struct rb_node *p;
 
 	p = ubi->ltree.rb_node;
 	while (p) {
-		struct ltree_entry *le;
+		struct ubi_ltree_entry *le;
 
-		le = rb_entry(p, struct ltree_entry, rb);
+		le = rb_entry(p, struct ubi_ltree_entry, rb);
 
 		if (vol_id < le->vol_id)
 			p = p->rb_left;
@@ -155,15 +132,17 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
  * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
  * failed.
  */
-static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
-					   int lnum)
+static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
+					       int vol_id, int lnum)
 {
-	struct ltree_entry *le, *le1, *le_free;
+	struct ubi_ltree_entry *le, *le1, *le_free;
 
-	le = kmem_cache_alloc(ltree_slab, GFP_NOFS);
+	le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
 	if (!le)
 		return ERR_PTR(-ENOMEM);
 
+	le->users = 0;
+	init_rwsem(&le->mutex);
 	le->vol_id = vol_id;
 	le->lnum = lnum;
 
@@ -189,7 +168,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
 		p = &ubi->ltree.rb_node;
 		while (*p) {
 			parent = *p;
-			le1 = rb_entry(parent, struct ltree_entry, rb);
+			le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
 
 			if (vol_id < le1->vol_id)
 				p = &(*p)->rb_left;
@@ -211,7 +190,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
 	spin_unlock(&ubi->ltree_lock);
 
 	if (le_free)
-		kmem_cache_free(ltree_slab, le_free);
+		kfree(le_free);
 
 	return le;
 }
@@ -227,7 +206,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
  */
 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
 {
-	struct ltree_entry *le;
+	struct ubi_ltree_entry *le;
 
 	le = ltree_add_entry(ubi, vol_id, lnum);
 	if (IS_ERR(le))
@@ -245,7 +224,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
 {
 	int free = 0;
-	struct ltree_entry *le;
+	struct ubi_ltree_entry *le;
 
 	spin_lock(&ubi->ltree_lock);
 	le = ltree_lookup(ubi, vol_id, lnum);
@@ -259,7 +238,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
 
 	up_read(&le->mutex);
 	if (free)
-		kmem_cache_free(ltree_slab, le);
+		kfree(le);
 }
 
 /**
@@ -273,7 +252,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
  */
 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
 {
-	struct ltree_entry *le;
+	struct ubi_ltree_entry *le;
 
 	le = ltree_add_entry(ubi, vol_id, lnum);
 	if (IS_ERR(le))
@@ -283,6 +262,44 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
 }
 
 /**
+ * leb_write_lock - lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing if there is no
+ * contention and does nothing if there is contention. Returns %0 in case of
+ * success, %1 in case of contention, and and a negative error code in case of
+ * failure.
+ */
+static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+	int free;
+	struct ubi_ltree_entry *le;
+
+	le = ltree_add_entry(ubi, vol_id, lnum);
+	if (IS_ERR(le))
+		return PTR_ERR(le);
+	if (down_write_trylock(&le->mutex))
+		return 0;
+
+	/* Contention, cancel */
+	spin_lock(&ubi->ltree_lock);
+	le->users -= 1;
+	ubi_assert(le->users >= 0);
+	if (le->users == 0) {
+		rb_erase(&le->rb, &ubi->ltree);
+		free = 1;
+	} else
+		free = 0;
+	spin_unlock(&ubi->ltree_lock);
+	if (free)
+		kfree(le);
+
+	return 1;
+}
+
+/**
  * leb_write_unlock - unlock logical eraseblock.
  * @ubi: UBI device description object
  * @vol_id: volume ID
@@ -291,7 +308,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
 {
 	int free;
-	struct ltree_entry *le;
+	struct ubi_ltree_entry *le;
 
 	spin_lock(&ubi->ltree_lock);
 	le = ltree_lookup(ubi, vol_id, lnum);
@@ -306,23 +323,23 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
 
 	up_write(&le->mutex);
 	if (free)
-		kmem_cache_free(ltree_slab, le);
+		kfree(le);
 }
 
 /**
  * ubi_eba_unmap_leb - un-map logical eraseblock.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  * @lnum: logical eraseblock number
  *
  * This function un-maps logical eraseblock @lnum and schedules corresponding
  * physical eraseblock for erasure. Returns zero in case of success and a
  * negative error code in case of failure.
  */
-int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum)
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+		      int lnum)
 {
-	int idx = vol_id2idx(ubi, vol_id), err, pnum;
-	struct ubi_volume *vol = ubi->volumes[idx];
+	int err, pnum, vol_id = vol->vol_id;
 
 	if (ubi->ro_mode)
 		return -EROFS;
@@ -349,7 +366,7 @@ out_unlock:
 /**
  * ubi_eba_read_leb - read data.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  * @lnum: logical eraseblock number
  * @buf: buffer to store the read data
  * @offset: offset from where to read
@@ -365,12 +382,11 @@ out_unlock:
  * returned for any volume type if an ECC error was detected by the MTD device
  * driver. Other negative error cored may be returned in case of other errors.
  */
-int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
-		     int offset, int len, int check)
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+		     void *buf, int offset, int len, int check)
 {
-	int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
+	int err, pnum, scrub = 0, vol_id = vol->vol_id;
 	struct ubi_vid_hdr *vid_hdr;
-	struct ubi_volume *vol = ubi->volumes[idx];
 	uint32_t uninitialized_var(crc);
 
 	err = leb_read_lock(ubi, vol_id, lnum);
@@ -578,7 +594,7 @@ write_error:
 /**
  * ubi_eba_write_leb - write data to dynamic volume.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  * @lnum: logical eraseblock number
  * @buf: the data to write
  * @offset: offset within the logical eraseblock where to write
@@ -586,15 +602,14 @@ write_error:
  * @dtype: data type
  *
  * This function writes data to logical eraseblock @lnum of a dynamic volume
- * @vol_id. Returns zero in case of success and a negative error code in case
+ * @vol. Returns zero in case of success and a negative error code in case
  * of failure. In case of error, it is possible that something was still
  * written to the flash media, but may be some garbage.
  */
-int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
 		      const void *buf, int offset, int len, int dtype)
 {
-	int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0;
-	struct ubi_volume *vol = ubi->volumes[idx];
+	int err, pnum, tries = 0, vol_id = vol->vol_id;
 	struct ubi_vid_hdr *vid_hdr;
 
 	if (ubi->ro_mode)
@@ -613,7 +628,8 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
 		if (err) {
 			ubi_warn("failed to write data to PEB %d", pnum);
 			if (err == -EIO && ubi->bad_allowed)
-				err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len);
+				err = recover_peb(ubi, pnum, vol_id, lnum, buf,
+						  offset, len);
 			if (err)
 				ubi_ro_mode(ubi);
 		}
@@ -656,11 +672,14 @@ retry:
 		goto write_error;
 	}
 
-	err = ubi_io_write_data(ubi, buf, pnum, offset, len);
-	if (err) {
-		ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, "
-			 "PEB %d", len, offset, vol_id, lnum, pnum);
-		goto write_error;
+	if (len) {
+		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+		if (err) {
+			ubi_warn("failed to write %d bytes at offset %d of "
+				 "LEB %d:%d, PEB %d", len, offset, vol_id,
+				 lnum, pnum);
+			goto write_error;
+		}
 	}
 
 	vol->eba_tbl[lnum] = pnum;
@@ -698,7 +717,7 @@ write_error:
 /**
  * ubi_eba_write_leb_st - write data to static volume.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  * @lnum: logical eraseblock number
  * @buf: data to write
  * @len: how many bytes to write
@@ -706,7 +725,7 @@ write_error:
  * @used_ebs: how many logical eraseblocks will this volume contain
  *
  * This function writes data to logical eraseblock @lnum of static volume
- * @vol_id. The @used_ebs argument should contain total number of logical
+ * @vol. The @used_ebs argument should contain total number of logical
  * eraseblock in this static volume.
  *
  * When writing to the last logical eraseblock, the @len argument doesn't have
@@ -718,12 +737,11 @@ write_error:
  * volumes. This function returns zero in case of success and a negative error
  * code in case of failure.
  */
-int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
-			 const void *buf, int len, int dtype, int used_ebs)
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+			 int lnum, const void *buf, int len, int dtype,
+			 int used_ebs)
 {
-	int err, pnum, tries = 0, data_size = len;
-	int idx = vol_id2idx(ubi, vol_id);
-	struct ubi_volume *vol = ubi->volumes[idx];
+	int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
 	struct ubi_vid_hdr *vid_hdr;
 	uint32_t crc;
 
@@ -819,7 +837,7 @@ write_error:
 /*
  * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  * @lnum: logical eraseblock number
  * @buf: data to write
  * @len: how many bytes to write
@@ -834,17 +852,27 @@ write_error:
  * UBI reserves one LEB for the "atomic LEB change" operation, so only one
  * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
  */
-int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
-			      const void *buf, int len, int dtype)
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+			      int lnum, const void *buf, int len, int dtype)
 {
-	int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id);
-	struct ubi_volume *vol = ubi->volumes[idx];
+	int err, pnum, tries = 0, vol_id = vol->vol_id;
 	struct ubi_vid_hdr *vid_hdr;
 	uint32_t crc;
 
 	if (ubi->ro_mode)
 		return -EROFS;
 
+	if (len == 0) {
+		/*
+		 * Special case when data length is zero. In this case the LEB
+		 * has to be unmapped and mapped somewhere else.
+		 */
+		err = ubi_eba_unmap_leb(ubi, vol, lnum);
+		if (err)
+			return err;
+		return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+	}
+
 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
 	if (!vid_hdr)
 		return -ENOMEM;
@@ -928,20 +956,6 @@ write_error:
 }
 
 /**
- * ltree_entry_ctor - lock tree entries slab cache constructor.
- * @obj: the lock-tree entry to construct
- * @cache: the lock tree entry slab cache
- * @flags: constructor flags
- */
-static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
-{
-	struct ltree_entry *le = obj;
-
-	le->users = 0;
-	init_rwsem(&le->mutex);
-}
-
-/**
  * ubi_eba_copy_leb - copy logical eraseblock.
  * @ubi: UBI device description object
  * @from: physical eraseblock number from where to copy
@@ -950,14 +964,16 @@ static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
  *
  * This function copies logical eraseblock from physical eraseblock @from to
  * physical eraseblock @to. The @vid_hdr buffer may be changed by this
- * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation
- * was canceled because bit-flips were detected at the target PEB, and a
- * negative error code in case of failure.
+ * function. Returns:
+ *   o %0  in case of success;
+ *   o %1 if the operation was canceled and should be tried later (e.g.,
+ *     because a bit-flip was detected at the target PEB);
+ *   o %2 if the volume is being deleted and this LEB should not be moved.
  */
 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 		     struct ubi_vid_hdr *vid_hdr)
 {
-	int err, vol_id, lnum, data_size, aldata_size, pnum, idx;
+	int err, vol_id, lnum, data_size, aldata_size, idx;
 	struct ubi_volume *vol;
 	uint32_t crc;
 
@@ -973,51 +989,67 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 		data_size = aldata_size =
 			    ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
 
-	/*
-	 * We do not want anybody to write to this logical eraseblock while we
-	 * are moving it, so we lock it.
-	 */
-	err = leb_write_lock(ubi, vol_id, lnum);
-	if (err)
-		return err;
-
-	mutex_lock(&ubi->buf_mutex);
-
-	/*
-	 * But the logical eraseblock might have been put by this time.
-	 * Cancel if it is true.
-	 */
 	idx = vol_id2idx(ubi, vol_id);
-
+	spin_lock(&ubi->volumes_lock);
 	/*
-	 * We may race with volume deletion/re-size, so we have to hold
-	 * @ubi->volumes_lock.
+	 * Note, we may race with volume deletion, which means that the volume
+	 * this logical eraseblock belongs to might be being deleted. Since the
+	 * volume deletion unmaps all the volume's logical eraseblocks, it will
+	 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
 	 */
-	spin_lock(&ubi->volumes_lock);
 	vol = ubi->volumes[idx];
 	if (!vol) {
-		dbg_eba("volume %d was removed meanwhile", vol_id);
+		/* No need to do further work, cancel */
+		dbg_eba("volume %d is being removed, cancel", vol_id);
 		spin_unlock(&ubi->volumes_lock);
-		goto out_unlock;
+		return 2;
 	}
+	spin_unlock(&ubi->volumes_lock);
 
-	pnum = vol->eba_tbl[lnum];
-	if (pnum != from) {
-		dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
-			"PEB %d, cancel", vol_id, lnum, from, pnum);
-		spin_unlock(&ubi->volumes_lock);
-		goto out_unlock;
+	/*
+	 * We do not want anybody to write to this logical eraseblock while we
+	 * are moving it, so lock it.
+	 *
+	 * Note, we are using non-waiting locking here, because we cannot sleep
+	 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
+	 * unmapping the LEB which is mapped to the PEB we are going to move
+	 * (@from). This task locks the LEB and goes sleep in the
+	 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
+	 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
+	 * LEB is already locked, we just do not move it and return %1.
+	 */
+	err = leb_write_trylock(ubi, vol_id, lnum);
+	if (err) {
+		dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
+		return err;
 	}
-	spin_unlock(&ubi->volumes_lock);
 
-	/* OK, now the LEB is locked and we can safely start moving it */
+	/*
+	 * The LEB might have been put meanwhile, and the task which put it is
+	 * probably waiting on @ubi->move_mutex. No need to continue the work,
+	 * cancel it.
+	 */
+	if (vol->eba_tbl[lnum] != from) {
+		dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
+			"PEB %d, cancel", vol_id, lnum, from,
+			vol->eba_tbl[lnum]);
+		err = 1;
+		goto out_unlock_leb;
+	}
 
+	/*
+	 * OK, now the LEB is locked and we can safely start moving iy. Since
+	 * this function utilizes thie @ubi->peb1_buf buffer which is shared
+	 * with some other functions, so lock the buffer by taking the
+	 * @ubi->buf_mutex.
+	 */
+	mutex_lock(&ubi->buf_mutex);
 	dbg_eba("read %d bytes of data", aldata_size);
 	err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
 	if (err && err != UBI_IO_BITFLIPS) {
 		ubi_warn("error %d while reading data from PEB %d",
 			 err, from);
-		goto out_unlock;
+		goto out_unlock_buf;
 	}
 
 	/*
@@ -1053,7 +1085,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 
 	err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
 	if (err)
-		goto out_unlock;
+		goto out_unlock_buf;
 
 	cond_resched();
 
@@ -1062,13 +1094,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 	if (err) {
 		if (err != UBI_IO_BITFLIPS)
 			ubi_warn("cannot read VID header back from PEB %d", to);
-		goto out_unlock;
+		else
+			err = 1;
+		goto out_unlock_buf;
 	}
 
 	if (data_size > 0) {
 		err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
 		if (err)
-			goto out_unlock;
+			goto out_unlock_buf;
 
 		cond_resched();
 
@@ -1082,7 +1116,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 			if (err != UBI_IO_BITFLIPS)
 				ubi_warn("cannot read data back from PEB %d",
 					 to);
-			goto out_unlock;
+			else
+				err = 1;
+			goto out_unlock_buf;
 		}
 
 		cond_resched();
@@ -1090,15 +1126,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 		if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
 			ubi_warn("read data back from PEB %d - it is different",
 				 to);
-			goto out_unlock;
+			goto out_unlock_buf;
 		}
 	}
 
 	ubi_assert(vol->eba_tbl[lnum] == from);
 	vol->eba_tbl[lnum] = to;
 
-out_unlock:
+out_unlock_buf:
 	mutex_unlock(&ubi->buf_mutex);
+out_unlock_leb:
 	leb_write_unlock(ubi, vol_id, lnum);
 	return err;
 }
@@ -1125,14 +1162,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
 	mutex_init(&ubi->alc_mutex);
 	ubi->ltree = RB_ROOT;
 
-	if (ubi_devices_cnt == 0) {
-		ltree_slab = kmem_cache_create("ubi_ltree_slab",
-					       sizeof(struct ltree_entry), 0,
-					       0, &ltree_entry_ctor);
-		if (!ltree_slab)
-			return -ENOMEM;
-	}
-
 	ubi->global_sqnum = si->max_sqnum + 1;
 	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
 
@@ -1168,6 +1197,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
 		}
 	}
 
+	if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
+		ubi_err("no enough physical eraseblocks (%d, need %d)",
+			ubi->avail_pebs, EBA_RESERVED_PEBS);
+		err = -ENOSPC;
+		goto out_free;
+	}
+	ubi->avail_pebs -= EBA_RESERVED_PEBS;
+	ubi->rsvd_pebs += EBA_RESERVED_PEBS;
+
 	if (ubi->bad_allowed) {
 		ubi_calculate_reserved(ubi);
 
@@ -1184,15 +1222,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
 		ubi->rsvd_pebs  += ubi->beb_rsvd_pebs;
 	}
 
-	if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
-		ubi_err("no enough physical eraseblocks (%d, need %d)",
-			ubi->avail_pebs, EBA_RESERVED_PEBS);
-		err = -ENOSPC;
-		goto out_free;
-	}
-	ubi->avail_pebs -= EBA_RESERVED_PEBS;
-	ubi->rsvd_pebs += EBA_RESERVED_PEBS;
-
 	dbg_eba("EBA unit is initialized");
 	return 0;
 
@@ -1202,8 +1231,6 @@ out_free:
 			continue;
 		kfree(ubi->volumes[i]->eba_tbl);
 	}
-	if (ubi_devices_cnt == 0)
-		kmem_cache_destroy(ltree_slab);
 	return err;
 }
 
@@ -1222,6 +1249,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
 			continue;
 		kfree(ubi->volumes[i]->eba_tbl);
 	}
-	if (ubi_devices_cnt == 1)
-		kmem_cache_destroy(ltree_slab);
 }
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 41ff74c60e14..d397219238d3 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -129,8 +129,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
 		if (to_read > total_read)
 			to_read = total_read;
 
-		err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs,
-				       to_read, 0);
+		err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0);
 		if (err)
 			break;
 
@@ -187,8 +186,8 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
 		if (to_write > total_written)
 			to_write = total_written;
 
-		err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs,
-					to_write, UBI_UNKNOWN);
+		err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write,
+					UBI_UNKNOWN);
 		if (err)
 			break;
 
@@ -237,7 +236,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
 		return -EROFS;
 
 	for (i = 0; i < count; i++) {
-		err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i);
+		err = ubi_eba_unmap_leb(ubi, vol, lnum + i);
 		if (err)
 			goto out_err;
 	}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 7c304eec78b5..db3efdef2433 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -173,6 +173,16 @@ retry:
 		ubi_err("error %d while reading %d bytes from PEB %d:%d, "
 			"read %zd bytes", err, len, pnum, offset, read);
 		ubi_dbg_dump_stack();
+
+		/*
+		 * The driver should never return -EBADMSG if it failed to read
+		 * all the requested data. But some buggy drivers might do
+		 * this, so we change it to -EIO.
+		 */
+		if (read != len && err == -EBADMSG) {
+			ubi_assert(0);
+			err = -EIO;
+		}
 	} else {
 		ubi_assert(len == read);
 
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 03c774f41549..a70d58823f8d 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -30,23 +30,27 @@
  * @ubi_num: UBI device number
  * @di: the information is stored here
  *
- * This function returns %0 in case of success and a %-ENODEV if there is no
- * such UBI device.
+ * This function returns %0 in case of success, %-EINVAL if the UBI device
+ * number is invalid, and %-ENODEV if there is no such UBI device.
  */
 int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
 {
-	const struct ubi_device *ubi;
+	struct ubi_device *ubi;
+
+	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+		return -EINVAL;
 
-	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES ||
-	    !ubi_devices[ubi_num])
+	ubi = ubi_get_device(ubi_num);
+	if (!ubi)
 		return -ENODEV;
 
-	ubi = ubi_devices[ubi_num];
 	di->ubi_num = ubi->ubi_num;
 	di->leb_size = ubi->leb_size;
 	di->min_io_size = ubi->min_io_size;
 	di->ro_mode = ubi->ro_mode;
-	di->cdev = MKDEV(ubi->major, 0);
+	di->cdev = ubi->cdev.dev;
+
+	ubi_put_device(ubi);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ubi_get_device_info);
@@ -73,7 +77,7 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
 	vi->usable_leb_size = vol->usable_leb_size;
 	vi->name_len = vol->name_len;
 	vi->name = vol->name;
-	vi->cdev = MKDEV(ubi->major, vi->vol_id + 1);
+	vi->cdev = vol->cdev.dev;
 }
 EXPORT_SYMBOL_GPL(ubi_get_volume_info);
 
@@ -104,37 +108,39 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
 
 	dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
 
-	err = -ENODEV;
-	if (ubi_num < 0)
-		return ERR_PTR(err);
-
-	ubi = ubi_devices[ubi_num];
-
-	if (!try_module_get(THIS_MODULE))
-		return ERR_PTR(err);
-
-	if (ubi_num >= UBI_MAX_DEVICES || !ubi)
-		goto out_put;
+	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+		return ERR_PTR(-EINVAL);
 
-	err = -EINVAL;
-	if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
-		goto out_put;
 	if (mode != UBI_READONLY && mode != UBI_READWRITE &&
 	    mode != UBI_EXCLUSIVE)
-		goto out_put;
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * First of all, we have to get the UBI device to prevent its removal.
+	 */
+	ubi = ubi_get_device(ubi_num);
+	if (!ubi)
+		return ERR_PTR(-ENODEV);
+
+	if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
+		err = -EINVAL;
+		goto out_put_ubi;
+	}
 
 	desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
 	if (!desc) {
 		err = -ENOMEM;
-		goto out_put;
+		goto out_put_ubi;
 	}
 
+	err = -ENODEV;
+	if (!try_module_get(THIS_MODULE))
+		goto out_free;
+
 	spin_lock(&ubi->volumes_lock);
 	vol = ubi->volumes[vol_id];
-	if (!vol) {
-		err = -ENODEV;
+	if (!vol)
 		goto out_unlock;
-	}
 
 	err = -EBUSY;
 	switch (mode) {
@@ -156,21 +162,19 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
 		vol->exclusive = 1;
 		break;
 	}
+	get_device(&vol->dev);
+	vol->ref_count += 1;
 	spin_unlock(&ubi->volumes_lock);
 
 	desc->vol = vol;
 	desc->mode = mode;
 
-	/*
-	 * To prevent simultaneous checks of the same volume we use @vtbl_mutex,
-	 * although it is not the purpose it was introduced for.
-	 */
-	mutex_lock(&ubi->vtbl_mutex);
+	mutex_lock(&ubi->ckvol_mutex);
 	if (!vol->checked) {
 		/* This is the first open - check the volume */
 		err = ubi_check_volume(ubi, vol_id);
 		if (err < 0) {
-			mutex_unlock(&ubi->vtbl_mutex);
+			mutex_unlock(&ubi->ckvol_mutex);
 			ubi_close_volume(desc);
 			return ERR_PTR(err);
 		}
@@ -181,14 +185,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
 		}
 		vol->checked = 1;
 	}
-	mutex_unlock(&ubi->vtbl_mutex);
+	mutex_unlock(&ubi->ckvol_mutex);
+
 	return desc;
 
 out_unlock:
 	spin_unlock(&ubi->volumes_lock);
-	kfree(desc);
-out_put:
 	module_put(THIS_MODULE);
+out_free:
+	kfree(desc);
+out_put_ubi:
+	ubi_put_device(ubi);
 	return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -205,8 +212,8 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
 					   int mode)
 {
 	int i, vol_id = -1, len;
-	struct ubi_volume_desc *ret;
 	struct ubi_device *ubi;
+	struct ubi_volume_desc *ret;
 
 	dbg_msg("open volume %s, mode %d", name, mode);
 
@@ -217,14 +224,12 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
 	if (len > UBI_VOL_NAME_MAX)
 		return ERR_PTR(-EINVAL);
 
-	ret = ERR_PTR(-ENODEV);
-	if (!try_module_get(THIS_MODULE))
-		return ret;
-
-	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num])
-		goto out_put;
+	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+		return ERR_PTR(-EINVAL);
 
-	ubi = ubi_devices[ubi_num];
+	ubi = ubi_get_device(ubi_num);
+	if (!ubi)
+		return ERR_PTR(-ENODEV);
 
 	spin_lock(&ubi->volumes_lock);
 	/* Walk all volumes of this UBI device */
@@ -238,13 +243,16 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
 	}
 	spin_unlock(&ubi->volumes_lock);
 
-	if (vol_id < 0)
-		goto out_put;
+	if (vol_id >= 0)
+		ret = ubi_open_volume(ubi_num, vol_id, mode);
+	else
+		ret = ERR_PTR(-ENODEV);
 
-	ret = ubi_open_volume(ubi_num, vol_id, mode);
-
-out_put:
-	module_put(THIS_MODULE);
+	/*
+	 * We should put the UBI device even in case of success, because
+	 * 'ubi_open_volume()' took a reference as well.
+	 */
+	ubi_put_device(ubi);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
@@ -256,10 +264,11 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
 void ubi_close_volume(struct ubi_volume_desc *desc)
 {
 	struct ubi_volume *vol = desc->vol;
+	struct ubi_device *ubi = vol->ubi;
 
 	dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
 
-	spin_lock(&vol->ubi->volumes_lock);
+	spin_lock(&ubi->volumes_lock);
 	switch (desc->mode) {
 	case UBI_READONLY:
 		vol->readers -= 1;
@@ -270,9 +279,12 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
 	case UBI_EXCLUSIVE:
 		vol->exclusive = 0;
 	}
-	spin_unlock(&vol->ubi->volumes_lock);
+	vol->ref_count -= 1;
+	spin_unlock(&ubi->volumes_lock);
 
 	kfree(desc);
+	put_device(&vol->dev);
+	ubi_put_device(ubi);
 	module_put(THIS_MODULE);
 }
 EXPORT_SYMBOL_GPL(ubi_close_volume);
@@ -332,7 +344,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
 	if (len == 0)
 		return 0;
 
-	err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check);
+	err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
 	if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
 		ubi_warn("mark volume %d as corrupted", vol_id);
 		vol->corrupted = 1;
@@ -399,7 +411,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
 	if (len == 0)
 		return 0;
 
-	return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype);
+	return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_write);
 
@@ -448,7 +460,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
 	if (len == 0)
 		return 0;
 
-	return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype);
+	return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_change);
 
@@ -468,9 +480,9 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
 {
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
-	int err, vol_id = vol->vol_id;
+	int err;
 
-	dbg_msg("erase LEB %d:%d", vol_id, lnum);
+	dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
 
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
@@ -481,7 +493,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
 	if (vol->upd_marker)
 		return -EBADF;
 
-	err = ubi_eba_unmap_leb(ubi, vol_id, lnum);
+	err = ubi_eba_unmap_leb(ubi, vol, lnum);
 	if (err)
 		return err;
 
@@ -529,9 +541,8 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
 {
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
-	int vol_id = vol->vol_id;
 
-	dbg_msg("unmap LEB %d:%d", vol_id, lnum);
+	dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
 
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
@@ -542,11 +553,55 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
 	if (vol->upd_marker)
 		return -EBADF;
 
-	return ubi_eba_unmap_leb(ubi, vol_id, lnum);
+	return ubi_eba_unmap_leb(ubi, vol, lnum);
 }
 EXPORT_SYMBOL_GPL(ubi_leb_unmap);
 
 /**
+ * ubi_leb_map - map logical erasblock to a physical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ * @dtype: expected data type
+ *
+ * This function maps an un-mapped logical eraseblock @lnum to a physical
+ * eraseblock. This means, that after a successfull invocation of this
+ * function the logical eraseblock @lnum will be empty (contain only %0xFF
+ * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
+ * happens.
+ *
+ * This function returns zero in case of success, %-EBADF if the volume is
+ * damaged because of an interrupted update, %-EBADMSG if the logical
+ * eraseblock is already mapped, and other negative error codes in case of
+ * other failures.
+ */
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+{
+	struct ubi_volume *vol = desc->vol;
+	struct ubi_device *ubi = vol->ubi;
+
+	dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
+
+	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+		return -EROFS;
+
+	if (lnum < 0 || lnum >= vol->reserved_pebs)
+		return -EINVAL;
+
+	if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
+	    dtype != UBI_UNKNOWN)
+		return -EINVAL;
+
+	if (vol->upd_marker)
+		return -EBADF;
+
+	if (vol->eba_tbl[lnum] >= 0)
+		return -EBADMSG;
+
+	return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_map);
+
+/**
  * ubi_is_mapped - check if logical eraseblock is mapped.
  * @desc: volume descriptor
  * @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 9e2338c8e2cf..93e052812012 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -79,7 +79,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
 		else
 			size = vol->usable_leb_size;
 
-		err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1);
+		err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
 		if (err) {
 			if (err == -EBADMSG)
 				err = 1;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index c7b0afc9d280..05aa3e7daba1 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -286,9 +286,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
 		 * FIXME: but this is anyway obsolete and will be removed at
 		 * some point.
 		 */
-
 		dbg_bld("using old crappy leb_ver stuff");
 
+		if (v1 == v2) {
+			ubi_err("PEB %d and PEB %d have the same version %lld",
+				seb->pnum, pnum, v1);
+			return -EINVAL;
+		}
+
 		abs = v1 - v2;
 		if (abs < 0)
 			abs = -abs;
@@ -390,7 +395,6 @@ out_free_buf:
 	vfree(buf);
 out_free_vidh:
 	ubi_free_vid_hdr(ubi, vh);
-	ubi_assert(err < 0);
 	return err;
 }
 
@@ -769,7 +773,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
  */
 static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
 {
-	long long ec;
+	long long uninitialized_var(ec);
 	int err, bitflips = 0, vol_id, ec_corr = 0;
 
 	dbg_bld("scan PEB %d", pnum);
@@ -854,7 +858,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
 	}
 
 	vol_id = be32_to_cpu(vidh->vol_id);
-	if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) {
+	if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
 		int lnum = be32_to_cpu(vidh->lnum);
 
 		/* Unsupported internal volume */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5e941a633030..457710615261 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -94,8 +94,43 @@ enum {
 	UBI_IO_BITFLIPS
 };
 
-extern int ubi_devices_cnt;
-extern struct ubi_device *ubi_devices[];
+/**
+ * struct ubi_wl_entry - wear-leveling entry.
+ * @rb: link in the corresponding RB-tree
+ * @ec: erase counter
+ * @pnum: physical eraseblock number
+ *
+ * This data structure is used in the WL unit. Each physical eraseblock has a
+ * corresponding &struct wl_entry object which may be kept in different
+ * RB-trees. See WL unit for details.
+ */
+struct ubi_wl_entry {
+	struct rb_node rb;
+	int ec;
+	int pnum;
+};
+
+/**
+ * struct ubi_ltree_entry - an entry in the lock tree.
+ * @rb: links RB-tree nodes
+ * @vol_id: volume ID of the locked logical eraseblock
+ * @lnum: locked logical eraseblock number
+ * @users: how many tasks are using this logical eraseblock or wait for it
+ * @mutex: read/write mutex to implement read/write access serialization to
+ *         the (@vol_id, @lnum) logical eraseblock
+ *
+ * This data structure is used in the EBA unit to implement per-LEB locking.
+ * When a logical eraseblock is being locked - corresponding
+ * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
+ * See EBA unit for details.
+ */
+struct ubi_ltree_entry {
+	struct rb_node rb;
+	int vol_id;
+	int lnum;
+	int users;
+	struct rw_semaphore mutex;
+};
 
 struct ubi_volume_desc;
 
@@ -105,11 +140,10 @@ struct ubi_volume_desc;
  * @cdev: character device object to create character device
  * @ubi: reference to the UBI device description object
  * @vol_id: volume ID
+ * @ref_count: volume reference count
  * @readers: number of users holding this volume in read-only mode
  * @writers: number of users holding this volume in read-write mode
  * @exclusive: whether somebody holds this volume in exclusive mode
- * @removed: if the volume was removed
- * @checked: if this static volume was checked
  *
  * @reserved_pebs: how many physical eraseblocks are reserved for this volume
  * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
@@ -117,21 +151,30 @@ struct ubi_volume_desc;
  * @used_ebs: how many logical eraseblocks in this volume contain data
  * @last_eb_bytes: how many bytes are stored in the last logical eraseblock
  * @used_bytes: how many bytes of data this volume contains
- * @upd_marker: non-zero if the update marker is set for this volume
- * @corrupted: non-zero if the volume is corrupted (static volumes only)
  * @alignment: volume alignment
  * @data_pad: how many bytes are not used at the end of physical eraseblocks to
- * satisfy the requested alignment
+ *            satisfy the requested alignment
  * @name_len: volume name length
  * @name: volume name
  *
- * @updating: whether the volume is being updated
  * @upd_ebs: how many eraseblocks are expected to be updated
- * @upd_bytes: how many bytes are expected to be received
- * @upd_received: how many update bytes were already received
- * @upd_buf: update buffer which is used to collect update data
+ * @ch_lnum: LEB number which is being changing by the atomic LEB change
+ *           operation
+ * @ch_dtype: data persistency type which is being changing by the atomic LEB
+ *            change operation
+ * @upd_bytes: how many bytes are expected to be received for volume update or
+ *             atomic LEB change
+ * @upd_received: how many bytes were already received for volume update or
+ *                atomic LEB change
+ * @upd_buf: update buffer which is used to collect update data or data for
+ *           atomic LEB change
  *
  * @eba_tbl: EBA table of this volume (LEB->PEB mapping)
+ * @checked: %1 if this static volume was checked
+ * @corrupted: %1 if the volume is corrupted (static volumes only)
+ * @upd_marker: %1 if the update marker is set for this volume
+ * @updating: %1 if the volume is being updated
+ * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
  *
  * @gluebi_desc: gluebi UBI volume descriptor
  * @gluebi_refcount: reference count of the gluebi MTD device
@@ -150,11 +193,10 @@ struct ubi_volume {
 	struct cdev cdev;
 	struct ubi_device *ubi;
 	int vol_id;
+	int ref_count;
 	int readers;
 	int writers;
 	int exclusive;
-	int removed;
-	int checked;
 
 	int reserved_pebs;
 	int vol_type;
@@ -162,23 +204,31 @@ struct ubi_volume {
 	int used_ebs;
 	int last_eb_bytes;
 	long long used_bytes;
-	int upd_marker;
-	int corrupted;
 	int alignment;
 	int data_pad;
 	int name_len;
 	char name[UBI_VOL_NAME_MAX+1];
 
-	int updating;
 	int upd_ebs;
+	int ch_lnum;
+	int ch_dtype;
 	long long upd_bytes;
 	long long upd_received;
 	void *upd_buf;
 
 	int *eba_tbl;
+	int checked:1;
+	int corrupted:1;
+	int upd_marker:1;
+	int updating:1;
+	int changing_leb:1;
 
 #ifdef CONFIG_MTD_UBI_GLUEBI
-	/* Gluebi-related stuff may be compiled out */
+	/*
+	 * Gluebi-related stuff may be compiled out.
+	 * TODO: this should not be built into UBI but should be a separate
+	 * ubimtd driver which works on top of UBI and emulates MTD devices.
+	 */
 	struct ubi_volume_desc *gluebi_desc;
 	int gluebi_refcount;
 	struct mtd_info gluebi_mtd;
@@ -200,28 +250,31 @@ struct ubi_wl_entry;
 
 /**
  * struct ubi_device - UBI device description structure
- * @dev: class device object to use the the Linux device model
+ * @dev: UBI device object to use the the Linux device model
  * @cdev: character device object to create character device
  * @ubi_num: UBI device number
  * @ubi_name: UBI device name
- * @major: character device major number
  * @vol_count: number of volumes in this UBI device
  * @volumes: volumes of this UBI device
  * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
- * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers,
- * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and
- * @vol->eba_tbl.
+ *                @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
+ *                @vol->readers, @vol->writers, @vol->exclusive,
+ *                @vol->ref_count, @vol->mapping and @vol->eba_tbl.
+ * @ref_count: count of references on the UBI device
  *
  * @rsvd_pebs: count of reserved physical eraseblocks
  * @avail_pebs: count of available physical eraseblocks
  * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB
- * handling
+ *                 handling
  * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
  *
+ * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
+ *                     of UBI ititializetion
  * @vtbl_slots: how many slots are available in the volume table
  * @vtbl_size: size of the volume table in bytes
  * @vtbl: in-RAM volume table copy
- * @vtbl_mutex: protects on-flash volume table
+ * @volumes_mutex: protects on-flash volume table and serializes volume
+ *                 changes, like creation, deletion, update, resize
  *
  * @max_ec: current highest erase counter value
  * @mean_ec: current mean erase counter value
@@ -238,15 +291,15 @@ struct ubi_wl_entry;
  * @prot.pnum: protection tree indexed by physical eraseblock numbers
  * @prot.aec: protection tree indexed by absolute erase counter value
  * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
- * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
- * fields
+ *           @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
+ *           fields
+ * @move_mutex: serializes eraseblock moves
  * @wl_scheduled: non-zero if the wear-leveling was scheduled
  * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
- * physical eraseblock
+ *             physical eraseblock
  * @abs_ec: absolute erase counter
  * @move_from: physical eraseblock from where the data is being moved
  * @move_to: physical eraseblock where the data is being moved to
- * @move_from_put: if the "from" PEB was put
  * @move_to_put: if the "to" PEB was put
  * @works: list of pending works
  * @works_count: count of pending works
@@ -273,13 +326,13 @@ struct ubi_wl_entry;
  * @hdrs_min_io_size
  * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
  * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
- * not
+ *               not
  * @mtd: MTD device descriptor
  *
  * @peb_buf1: a buffer of PEB size used for different purposes
  * @peb_buf2: another buffer of PEB size used for different purposes
  * @buf_mutex: proptects @peb_buf1 and @peb_buf2
- * @dbg_peb_buf:  buffer of PEB size used for debugging
+ * @dbg_peb_buf: buffer of PEB size used for debugging
  * @dbg_buf_mutex: proptects @dbg_peb_buf
  */
 struct ubi_device {
@@ -287,22 +340,24 @@ struct ubi_device {
 	struct device dev;
 	int ubi_num;
 	char ubi_name[sizeof(UBI_NAME_STR)+5];
-	int major;
 	int vol_count;
 	struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
 	spinlock_t volumes_lock;
+	int ref_count;
 
 	int rsvd_pebs;
 	int avail_pebs;
 	int beb_rsvd_pebs;
 	int beb_rsvd_level;
 
+	int autoresize_vol_id;
 	int vtbl_slots;
 	int vtbl_size;
 	struct ubi_vtbl_record *vtbl;
-	struct mutex vtbl_mutex;
+	struct mutex volumes_mutex;
 
 	int max_ec;
+	/* TODO: mean_ec is not updated run-time, fix */
 	int mean_ec;
 
 	/* EBA unit's stuff */
@@ -320,12 +375,13 @@ struct ubi_device {
 		struct rb_root aec;
 	} prot;
 	spinlock_t wl_lock;
+	struct mutex move_mutex;
+	struct rw_semaphore work_sem;
 	int wl_scheduled;
 	struct ubi_wl_entry **lookuptbl;
 	unsigned long long abs_ec;
 	struct ubi_wl_entry *move_from;
 	struct ubi_wl_entry *move_to;
-	int move_from_put;
 	int move_to_put;
 	struct list_head works;
 	int works_count;
@@ -355,15 +411,19 @@ struct ubi_device {
 	void *peb_buf1;
 	void *peb_buf2;
 	struct mutex buf_mutex;
+	struct mutex ckvol_mutex;
 #ifdef CONFIG_MTD_UBI_DEBUG
 	void *dbg_peb_buf;
 	struct mutex dbg_buf_mutex;
 #endif
 };
 
+extern struct kmem_cache *ubi_wl_entry_slab;
+extern struct file_operations ubi_ctrl_cdev_operations;
 extern struct file_operations ubi_cdev_operations;
 extern struct file_operations ubi_vol_cdev_operations;
 extern struct class *ubi_class;
+extern struct mutex ubi_devices_mutex;
 
 /* vtbl.c */
 int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -374,13 +434,18 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
 int ubi_remove_volume(struct ubi_volume_desc *desc);
 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
-int ubi_add_volume(struct ubi_device *ubi, int vol_id);
-void ubi_free_volume(struct ubi_device *ubi, int vol_id);
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
 
 /* upd.c */
-int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes);
-int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+		     long long bytes);
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
 			 const void __user *buf, int count);
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+			 const struct ubi_leb_change_req *req);
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+			     const void __user *buf, int count);
 
 /* misc.c */
 int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length);
@@ -399,16 +464,17 @@ void ubi_gluebi_updated(struct ubi_volume *vol);
 #endif
 
 /* eba.c */
-int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum);
-int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
-		     int offset, int len, int check);
-int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+		      int lnum);
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+		     void *buf, int offset, int len, int check);
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
 		      const void *buf, int offset, int len, int dtype);
-int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
-			 const void *buf, int len, int dtype,
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+			 int lnum, const void *buf, int len, int dtype,
 			 int used_ebs);
-int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
-			      const void *buf, int len, int dtype);
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+			      int lnum, const void *buf, int len, int dtype);
 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
 		     struct ubi_vid_hdr *vid_hdr);
 int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
@@ -421,6 +487,7 @@ int ubi_wl_flush(struct ubi_device *ubi);
 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
 void ubi_wl_close(struct ubi_device *ubi);
+int ubi_thread(void *u);
 
 /* io.c */
 int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -439,6 +506,14 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
 			 struct ubi_vid_hdr *vid_hdr);
 
+/* build.c */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
+int ubi_detach_mtd_dev(int ubi_num, int anyway);
+struct ubi_device *ubi_get_device(int ubi_num);
+void ubi_put_device(struct ubi_device *ubi);
+struct ubi_device *ubi_get_by_major(int major);
+int ubi_major2num(int major);
+
 /*
  * ubi_rb_for_each_entry - walk an RB-tree.
  * @rb: a pointer to type 'struct rb_node' to to use as a loop counter
@@ -523,8 +598,10 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
  */
 static inline void ubi_ro_mode(struct ubi_device *ubi)
 {
-	ubi->ro_mode = 1;
-	ubi_warn("switch to read-only mode");
+	if (!ubi->ro_mode) {
+		ubi->ro_mode = 1;
+		ubi_warn("switch to read-only mode");
+	}
 }
 
 /**
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0efc586a8328..ddaa1a56cc69 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -22,7 +22,8 @@
  */
 
 /*
- * This file contains implementation of the volume update functionality.
+ * This file contains implementation of the volume update and atomic LEB change
+ * functionality.
  *
  * The update operation is based on the per-volume update marker which is
  * stored in the volume table. The update marker is set before the update
@@ -45,29 +46,31 @@
 /**
  * set_update_marker - set update marker.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  *
- * This function sets the update marker flag for volume @vol_id. Returns zero
+ * This function sets the update marker flag for volume @vol. Returns zero
  * in case of success and a negative error code in case of failure.
  */
-static int set_update_marker(struct ubi_device *ubi, int vol_id)
+static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
 {
 	int err;
 	struct ubi_vtbl_record vtbl_rec;
-	struct ubi_volume *vol = ubi->volumes[vol_id];
 
-	dbg_msg("set update marker for volume %d", vol_id);
+	dbg_msg("set update marker for volume %d", vol->vol_id);
 
 	if (vol->upd_marker) {
-		ubi_assert(ubi->vtbl[vol_id].upd_marker);
+		ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
 		dbg_msg("already set");
 		return 0;
 	}
 
-	memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+	memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
+	       sizeof(struct ubi_vtbl_record));
 	vtbl_rec.upd_marker = 1;
 
-	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+	mutex_lock(&ubi->volumes_mutex);
+	err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+	mutex_unlock(&ubi->volumes_mutex);
 	vol->upd_marker = 1;
 	return err;
 }
@@ -75,23 +78,24 @@ static int set_update_marker(struct ubi_device *ubi, int vol_id)
 /**
  * clear_update_marker - clear update marker.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  * @bytes: new data size in bytes
  *
- * This function clears the update marker for volume @vol_id, sets new volume
+ * This function clears the update marker for volume @vol, sets new volume
  * data size and clears the "corrupted" flag (static volumes only). Returns
  * zero in case of success and a negative error code in case of failure.
  */
-static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes)
+static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
+			       long long bytes)
 {
 	int err;
 	uint64_t tmp;
 	struct ubi_vtbl_record vtbl_rec;
-	struct ubi_volume *vol = ubi->volumes[vol_id];
 
-	dbg_msg("clear update marker for volume %d", vol_id);
+	dbg_msg("clear update marker for volume %d", vol->vol_id);
 
-	memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+	memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
+	       sizeof(struct ubi_vtbl_record));
 	ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
 	vtbl_rec.upd_marker = 0;
 
@@ -106,7 +110,9 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
 			vol->last_eb_bytes = vol->usable_leb_size;
 	}
 
-	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+	mutex_lock(&ubi->volumes_mutex);
+	err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+	mutex_unlock(&ubi->volumes_mutex);
 	vol->upd_marker = 0;
 	return err;
 }
@@ -114,35 +120,36 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
 /**
  * ubi_start_update - start volume update.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  * @bytes: update bytes
  *
  * This function starts volume update operation. If @bytes is zero, the volume
  * is just wiped out. Returns zero in case of success and a negative error code
  * in case of failure.
  */
-int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+		     long long bytes)
 {
 	int i, err;
 	uint64_t tmp;
-	struct ubi_volume *vol = ubi->volumes[vol_id];
 
-	dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes);
+	dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes);
+	ubi_assert(!vol->updating && !vol->changing_leb);
 	vol->updating = 1;
 
-	err = set_update_marker(ubi, vol_id);
+	err = set_update_marker(ubi, vol);
 	if (err)
 		return err;
 
 	/* Before updating - wipe out the volume */
 	for (i = 0; i < vol->reserved_pebs; i++) {
-		err = ubi_eba_unmap_leb(ubi, vol_id, i);
+		err = ubi_eba_unmap_leb(ubi, vol, i);
 		if (err)
 			return err;
 	}
 
 	if (bytes == 0) {
-		err = clear_update_marker(ubi, vol_id, 0);
+		err = clear_update_marker(ubi, vol, 0);
 		if (err)
 			return err;
 		err = ubi_wl_flush(ubi);
@@ -163,9 +170,42 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
 }
 
 /**
+ * ubi_start_leb_change - start atomic LEB change.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @req: operation request
+ *
+ * This function starts atomic LEB change operation. Returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+			 const struct ubi_leb_change_req *req)
+{
+	ubi_assert(!vol->updating && !vol->changing_leb);
+
+	dbg_msg("start changing LEB %d:%d, %u bytes",
+		vol->vol_id, req->lnum, req->bytes);
+	if (req->bytes == 0)
+		return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
+						 req->dtype);
+
+	vol->upd_bytes = req->bytes;
+	vol->upd_received = 0;
+	vol->changing_leb = 1;
+	vol->ch_lnum = req->lnum;
+	vol->ch_dtype = req->dtype;
+
+	vol->upd_buf = vmalloc(req->bytes);
+	if (!vol->upd_buf)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
  * write_leb - write update data.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  * @lnum: logical eraseblock number
  * @buf: data to write
  * @len: data size
@@ -191,26 +231,22 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
-		     int len, int used_ebs)
+static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+		     void *buf, int len, int used_ebs)
 {
-	int err, l;
-	struct ubi_volume *vol = ubi->volumes[vol_id];
+	int err;
 
 	if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
-		l = ALIGN(len, ubi->min_io_size);
-		memset(buf + len, 0xFF, l - len);
+		len = ALIGN(len, ubi->min_io_size);
+		memset(buf + len, 0xFF, len - len);
 
-		l = ubi_calc_data_len(ubi, buf, l);
-		if (l == 0) {
+		len = ubi_calc_data_len(ubi, buf, len);
+		if (len == 0) {
 			dbg_msg("all %d bytes contain 0xFF - skip", len);
 			return 0;
 		}
-		if (len != l)
-			dbg_msg("skip last %d bytes (0xFF)", len - l);
 
-		err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l,
-					UBI_UNKNOWN);
+		err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN);
 	} else {
 		/*
 		 * When writing static volume, and this is the last logical
@@ -222,7 +258,7 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
 		 * contain zeros, not random trash.
 		 */
 		memset(buf + len, 0, vol->usable_leb_size - len);
-		err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len,
+		err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
 					   UBI_UNKNOWN, used_ebs);
 	}
 
@@ -236,16 +272,15 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
  * @count: how much bytes to write
  *
  * This function writes more data to the volume which is being updated. It may
- * be called arbitrary number of times until all of the update data arrive.
- * This function returns %0 in case of success, number of bytes written during
- * the last call if the whole volume update was successfully finished, and a
+ * be called arbitrary number of times until all the update data arriveis. This
+ * function returns %0 in case of success, number of bytes written during the
+ * last call if the whole volume update has been successfully finished, and a
  * negative error code in case of failure.
  */
-int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
 			 const void __user *buf, int count)
 {
 	uint64_t tmp;
-	struct ubi_volume *vol = ubi->volumes[vol_id];
 	int lnum, offs, err = 0, len, to_write = count;
 
 	dbg_msg("write %d of %lld bytes, %lld already passed",
@@ -290,8 +325,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
 			 * is the last chunk, it's time to flush the buffer.
 			 */
 			ubi_assert(flush_len <= vol->usable_leb_size);
-			err = write_leb(ubi, vol_id, lnum, vol->upd_buf,
-					flush_len, vol->upd_ebs);
+			err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
+					vol->upd_ebs);
 			if (err)
 				return err;
 		}
@@ -318,8 +353,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
 
 		if (len == vol->usable_leb_size ||
 		    vol->upd_received + len == vol->upd_bytes) {
-			err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len,
-					vol->upd_ebs);
+			err = write_leb(ubi, vol, lnum, vol->upd_buf,
+					len, vol->upd_ebs);
 			if (err)
 				break;
 		}
@@ -333,16 +368,70 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
 	ubi_assert(vol->upd_received <= vol->upd_bytes);
 	if (vol->upd_received == vol->upd_bytes) {
 		/* The update is finished, clear the update marker */
-		err = clear_update_marker(ubi, vol_id, vol->upd_bytes);
+		err = clear_update_marker(ubi, vol, vol->upd_bytes);
 		if (err)
 			return err;
 		err = ubi_wl_flush(ubi);
 		if (err == 0) {
+			vol->updating = 0;
 			err = to_write;
 			vfree(vol->upd_buf);
-			vol->updating = 0;
 		}
 	}
 
 	return err;
 }
+
+/**
+ * ubi_more_leb_change_data - accept more data for atomic LEB change.
+ * @vol: volume description object
+ * @buf: write data (user-space memory buffer)
+ * @count: how much bytes to write
+ *
+ * This function accepts more data to the volume which is being under the
+ * "atomic LEB change" operation. It may be called arbitrary number of times
+ * until all data arrives. This function returns %0 in case of success, number
+ * of bytes written during the last call if the whole "atomic LEB change"
+ * operation has been successfully finished, and a negative error code in case
+ * of failure.
+ */
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+			     const void __user *buf, int count)
+{
+	int err;
+
+	dbg_msg("write %d of %lld bytes, %lld already passed",
+		count, vol->upd_bytes, vol->upd_received);
+
+	if (ubi->ro_mode)
+		return -EROFS;
+
+	if (vol->upd_received + count > vol->upd_bytes)
+		count = vol->upd_bytes - vol->upd_received;
+
+	err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count);
+	if (err)
+		return -EFAULT;
+
+	vol->upd_received += count;
+
+	if (vol->upd_received == vol->upd_bytes) {
+		int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
+
+		memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes);
+		len = ubi_calc_data_len(ubi, vol->upd_buf, len);
+		err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
+						vol->upd_buf, len, UBI_UNKNOWN);
+		if (err)
+			return err;
+	}
+
+	ubi_assert(vol->upd_received <= vol->upd_bytes);
+	if (vol->upd_received == vol->upd_bytes) {
+		vol->changing_leb = 0;
+		err = count;
+		vfree(vol->upd_buf);
+	}
+
+	return err;
+}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 88629a320c2b..a3ca2257e601 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -63,21 +63,30 @@ static struct device_attribute attr_vol_upd_marker =
  * B. process 2 removes volume Y;
  * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
  *
- * What we want to do in a situation like that is to return error when the file
- * is read. This is done by means of the 'removed' flag and the 'vol_lock' of
- * the UBI volume description object.
+ * In this situation, this function will return %-ENODEV because it will find
+ * out that the volume was removed from the @ubi->volumes array.
  */
 static ssize_t vol_attribute_show(struct device *dev,
 				  struct device_attribute *attr, char *buf)
 {
 	int ret;
 	struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+	struct ubi_device *ubi;
 
-	spin_lock(&vol->ubi->volumes_lock);
-	if (vol->removed) {
-		spin_unlock(&vol->ubi->volumes_lock);
+	ubi = ubi_get_device(vol->ubi->ubi_num);
+	if (!ubi)
+		return -ENODEV;
+
+	spin_lock(&ubi->volumes_lock);
+	if (!ubi->volumes[vol->vol_id]) {
+		spin_unlock(&ubi->volumes_lock);
+		ubi_put_device(ubi);
 		return -ENODEV;
 	}
+	/* Take a reference to prevent volume removal */
+	vol->ref_count += 1;
+	spin_unlock(&ubi->volumes_lock);
+
 	if (attr == &attr_vol_reserved_ebs)
 		ret = sprintf(buf, "%d\n", vol->reserved_pebs);
 	else if (attr == &attr_vol_type) {
@@ -94,15 +103,22 @@ static ssize_t vol_attribute_show(struct device *dev,
 		ret = sprintf(buf, "%d\n", vol->corrupted);
 	else if (attr == &attr_vol_alignment)
 		ret = sprintf(buf, "%d\n", vol->alignment);
-	else if (attr == &attr_vol_usable_eb_size) {
+	else if (attr == &attr_vol_usable_eb_size)
 		ret = sprintf(buf, "%d\n", vol->usable_leb_size);
-	} else if (attr == &attr_vol_data_bytes)
+	else if (attr == &attr_vol_data_bytes)
 		ret = sprintf(buf, "%lld\n", vol->used_bytes);
 	else if (attr == &attr_vol_upd_marker)
 		ret = sprintf(buf, "%d\n", vol->upd_marker);
 	else
-		BUG();
-	spin_unlock(&vol->ubi->volumes_lock);
+		/* This must be a bug */
+		ret = -EINVAL;
+
+	/* We've done the operation, drop volume and UBI device references */
+	spin_lock(&ubi->volumes_lock);
+	vol->ref_count -= 1;
+	ubi_assert(vol->ref_count >= 0);
+	spin_unlock(&ubi->volumes_lock);
+	ubi_put_device(ubi);
 	return ret;
 }
 
@@ -110,7 +126,7 @@ static ssize_t vol_attribute_show(struct device *dev,
 static void vol_release(struct device *dev)
 {
 	struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
-	ubi_assert(vol->removed);
+
 	kfree(vol);
 }
 
@@ -152,9 +168,7 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
 	if (err)
 		return err;
 	err = device_create_file(&vol->dev, &attr_vol_upd_marker);
-	if (err)
-		return err;
-	return 0;
+	return err;
 }
 
 /**
@@ -180,16 +194,18 @@ static void volume_sysfs_close(struct ubi_volume *vol)
  * @req: volume creation request
  *
  * This function creates volume described by @req. If @req->vol_id id
- * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume
+ * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
  * and saves it in @req->vol_id. Returns zero in case of success and a negative
- * error code in case of failure.
+ * error code in case of failure. Note, the caller has to have the
+ * @ubi->volumes_mutex locked.
  */
 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
 {
-	int i, err, vol_id = req->vol_id;
+	int i, err, vol_id = req->vol_id, dont_free = 0;
 	struct ubi_volume *vol;
 	struct ubi_vtbl_record vtbl_rec;
 	uint64_t bytes;
+	dev_t dev;
 
 	if (ubi->ro_mode)
 		return -EROFS;
@@ -199,7 +215,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
 		return -ENOMEM;
 
 	spin_lock(&ubi->volumes_lock);
-
 	if (vol_id == UBI_VOL_NUM_AUTO) {
 		/* Find unused volume ID */
 		dbg_msg("search for vacant volume ID");
@@ -252,6 +267,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
 	}
 	ubi->avail_pebs -= vol->reserved_pebs;
 	ubi->rsvd_pebs += vol->reserved_pebs;
+	spin_unlock(&ubi->volumes_lock);
 
 	vol->vol_id    = vol_id;
 	vol->alignment = req->alignment;
@@ -259,10 +275,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
 	vol->vol_type  = req->vol_type;
 	vol->name_len  = req->name_len;
 	memcpy(vol->name, req->name, vol->name_len + 1);
-	vol->exclusive = 1;
 	vol->ubi = ubi;
-	ubi->volumes[vol_id] = vol;
-	spin_unlock(&ubi->volumes_lock);
 
 	/*
 	 * Finish all pending erases because there may be some LEBs belonging
@@ -299,9 +312,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
 	/* Register character device for the volume */
 	cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
 	vol->cdev.owner = THIS_MODULE;
-	err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1);
+	dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
+	err = cdev_add(&vol->cdev, dev, 1);
 	if (err) {
-		ubi_err("cannot add character device for volume %d", vol_id);
+		ubi_err("cannot add character device");
 		goto out_mapping;
 	}
 
@@ -311,12 +325,15 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
 
 	vol->dev.release = vol_release;
 	vol->dev.parent = &ubi->dev;
-	vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1);
+	vol->dev.devt = dev;
 	vol->dev.class = ubi_class;
+
 	sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
 	err = device_register(&vol->dev);
-	if (err)
+	if (err) {
+		ubi_err("cannot register device");
 		goto out_gluebi;
+	}
 
 	err = volume_sysfs_init(ubi, vol);
 	if (err)
@@ -339,15 +356,27 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
 		goto out_sysfs;
 
 	spin_lock(&ubi->volumes_lock);
+	ubi->volumes[vol_id] = vol;
 	ubi->vol_count += 1;
-	vol->exclusive = 0;
 	spin_unlock(&ubi->volumes_lock);
 
 	paranoid_check_volumes(ubi);
 	return 0;
 
+out_sysfs:
+	/*
+	 * We have registered our device, we should not free the volume*
+	 * description object in this function in case of an error - it is
+	 * freed by the release function.
+	 *
+	 * Get device reference to prevent the release function from being
+	 * called just after sysfs has been closed.
+	 */
+	dont_free = 1;
+	get_device(&vol->dev);
+	volume_sysfs_close(vol);
 out_gluebi:
-	err = ubi_destroy_gluebi(vol);
+	ubi_destroy_gluebi(vol);
 out_cdev:
 	cdev_del(&vol->cdev);
 out_mapping:
@@ -356,26 +385,13 @@ out_acc:
 	spin_lock(&ubi->volumes_lock);
 	ubi->rsvd_pebs -= vol->reserved_pebs;
 	ubi->avail_pebs += vol->reserved_pebs;
-	ubi->volumes[vol_id] = NULL;
 out_unlock:
 	spin_unlock(&ubi->volumes_lock);
-	kfree(vol);
-	return err;
-
-	/*
-	 * We are registered, so @vol is destroyed in the release function and
-	 * we have to de-initialize differently.
-	 */
-out_sysfs:
-	err = ubi_destroy_gluebi(vol);
-	cdev_del(&vol->cdev);
-	kfree(vol->eba_tbl);
-	spin_lock(&ubi->volumes_lock);
-	ubi->rsvd_pebs -= vol->reserved_pebs;
-	ubi->avail_pebs += vol->reserved_pebs;
-	ubi->volumes[vol_id] = NULL;
-	spin_unlock(&ubi->volumes_lock);
-	volume_sysfs_close(vol);
+	if (dont_free)
+		put_device(&vol->dev);
+	else
+		kfree(vol);
+	ubi_err("cannot create volume %d, error %d", vol_id, err);
 	return err;
 }
 
@@ -385,7 +401,8 @@ out_sysfs:
  *
  * This function removes volume described by @desc. The volume has to be opened
  * in "exclusive" mode. Returns zero in case of success and a negative error
- * code in case of failure.
+ * code in case of failure. The caller has to have the @ubi->volumes_mutex
+ * locked.
  */
 int ubi_remove_volume(struct ubi_volume_desc *desc)
 {
@@ -400,30 +417,36 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
 	if (ubi->ro_mode)
 		return -EROFS;
 
+	spin_lock(&ubi->volumes_lock);
+	if (vol->ref_count > 1) {
+		/*
+		 * The volume is busy, probably someone is reading one of its
+		 * sysfs files.
+		 */
+		err = -EBUSY;
+		goto out_unlock;
+	}
+	ubi->volumes[vol_id] = NULL;
+	spin_unlock(&ubi->volumes_lock);
+
 	err = ubi_destroy_gluebi(vol);
 	if (err)
-		return err;
+		goto out_err;
 
 	err = ubi_change_vtbl_record(ubi, vol_id, NULL);
 	if (err)
-		return err;
+		goto out_err;
 
 	for (i = 0; i < vol->reserved_pebs; i++) {
-		err = ubi_eba_unmap_leb(ubi, vol_id, i);
+		err = ubi_eba_unmap_leb(ubi, vol, i);
 		if (err)
-			return err;
+			goto out_err;
 	}
 
-	spin_lock(&ubi->volumes_lock);
-	vol->removed = 1;
-	ubi->volumes[vol_id] = NULL;
-	spin_unlock(&ubi->volumes_lock);
-
 	kfree(vol->eba_tbl);
 	vol->eba_tbl = NULL;
 	cdev_del(&vol->cdev);
 	volume_sysfs_close(vol);
-	kfree(desc);
 
 	spin_lock(&ubi->volumes_lock);
 	ubi->rsvd_pebs -= reserved_pebs;
@@ -441,8 +464,15 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
 	spin_unlock(&ubi->volumes_lock);
 
 	paranoid_check_volumes(ubi);
-	module_put(THIS_MODULE);
 	return 0;
+
+out_err:
+	ubi_err("cannot remove volume %d, error %d", vol_id, err);
+	spin_lock(&ubi->volumes_lock);
+	ubi->volumes[vol_id] = vol;
+out_unlock:
+	spin_unlock(&ubi->volumes_lock);
+	return err;
 }
 
 /**
@@ -450,8 +480,9 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
  * @desc: volume descriptor
  * @reserved_pebs: new size in physical eraseblocks
  *
- * This function returns zero in case of success, and a negative error code in
- * case of failure.
+ * This function re-sizes the volume and returns zero in case of success, and a
+ * negative error code in case of failure. The caller has to have the
+ * @ubi->volumes_mutex locked.
  */
 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
 {
@@ -466,8 +497,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
 
 	dbg_msg("re-size volume %d to from %d to %d PEBs",
 		vol_id, vol->reserved_pebs, reserved_pebs);
-	ubi_assert(desc->mode == UBI_EXCLUSIVE);
-	ubi_assert(vol == ubi->volumes[vol_id]);
 
 	if (vol->vol_type == UBI_STATIC_VOLUME &&
 	    reserved_pebs < vol->used_ebs) {
@@ -487,6 +516,14 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
 	for (i = 0; i < reserved_pebs; i++)
 		new_mapping[i] = UBI_LEB_UNMAPPED;
 
+	spin_lock(&ubi->volumes_lock);
+	if (vol->ref_count > 1) {
+		spin_unlock(&ubi->volumes_lock);
+		err = -EBUSY;
+		goto out_free;
+	}
+	spin_unlock(&ubi->volumes_lock);
+
 	/* Reserve physical eraseblocks */
 	pebs = reserved_pebs - vol->reserved_pebs;
 	if (pebs > 0) {
@@ -516,7 +553,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
 
 	if (pebs < 0) {
 		for (i = 0; i < -pebs; i++) {
-			err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i);
+			err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
 			if (err)
 				goto out_acc;
 		}
@@ -565,27 +602,28 @@ out_free:
 /**
  * ubi_add_volume - add volume.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  *
- * This function adds an existin volume and initializes all its data
- * structures. Returnes zero in case of success and a negative error code in
+ * This function adds an existing volume and initializes all its data
+ * structures. Returns zero in case of success and a negative error code in
  * case of failure.
  */
-int ubi_add_volume(struct ubi_device *ubi, int vol_id)
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
 {
-	int err;
-	struct ubi_volume *vol = ubi->volumes[vol_id];
+	int err, vol_id = vol->vol_id;
+	dev_t dev;
 
 	dbg_msg("add volume %d", vol_id);
 	ubi_dbg_dump_vol_info(vol);
-	ubi_assert(vol);
 
 	/* Register character device for the volume */
 	cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
 	vol->cdev.owner = THIS_MODULE;
-	err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1);
+	dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
+	err = cdev_add(&vol->cdev, dev, 1);
 	if (err) {
-		ubi_err("cannot add character device for volume %d", vol_id);
+		ubi_err("cannot add character device for volume %d, error %d",
+			vol_id, err);
 		return err;
 	}
 
@@ -595,7 +633,7 @@ int ubi_add_volume(struct ubi_device *ubi, int vol_id)
 
 	vol->dev.release = vol_release;
 	vol->dev.parent = &ubi->dev;
-	vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1);
+	vol->dev.devt = dev;
 	vol->dev.class = ubi_class;
 	sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
 	err = device_register(&vol->dev);
@@ -623,22 +661,19 @@ out_cdev:
 /**
  * ubi_free_volume - free volume.
  * @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
  *
- * This function frees all resources for volume @vol_id but does not remove it.
+ * This function frees all resources for volume @vol but does not remove it.
  * Used only when the UBI device is detached.
  */
-void ubi_free_volume(struct ubi_device *ubi, int vol_id)
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
 {
 	int err;
-	struct ubi_volume *vol = ubi->volumes[vol_id];
 
-	dbg_msg("free volume %d", vol_id);
-	ubi_assert(vol);
+	dbg_msg("free volume %d", vol->vol_id);
 
-	vol->removed = 1;
+	ubi->volumes[vol->vol_id] = NULL;
 	err = ubi_destroy_gluebi(vol);
-	ubi->volumes[vol_id] = NULL;
 	cdev_del(&vol->cdev);
 	volume_sysfs_close(vol);
 }
@@ -708,11 +743,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
 		goto fail;
 	}
 
-	if (vol->upd_marker != 0 && vol->upd_marker != 1) {
-		ubi_err("bad upd_marker");
-		goto fail;
-	}
-
 	if (vol->upd_marker && vol->corrupted) {
 		dbg_err("update marker and corrupted simultaneously");
 		goto fail;
@@ -747,7 +777,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
 
 	n = (long long)vol->used_ebs * vol->usable_leb_size;
 	if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
-		if (vol->corrupted != 0) {
+		if (vol->corrupted) {
 			ubi_err("corrupted dynamic volume");
 			goto fail;
 		}
@@ -764,10 +794,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
 			goto fail;
 		}
 	} else {
-		if (vol->corrupted != 0 && vol->corrupted != 1) {
-			ubi_err("bad corrupted");
-			goto fail;
-		}
 		if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
 			ubi_err("bad used_ebs");
 			goto fail;
@@ -820,9 +846,7 @@ static void paranoid_check_volumes(struct ubi_device *ubi)
 {
 	int i;
 
-	mutex_lock(&ubi->vtbl_mutex);
 	for (i = 0; i < ubi->vtbl_slots; i++)
 		paranoid_check_volume(ubi, i);
-	mutex_unlock(&ubi->vtbl_mutex);
 }
 #endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 25b3bd61c7ec..56fc3fbce838 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -86,8 +86,10 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
 {
 	int i, err;
 	uint32_t crc;
+	struct ubi_volume *layout_vol;
 
 	ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
+	layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
 
 	if (!vtbl_rec)
 		vtbl_rec = &empty_vtbl_record;
@@ -96,31 +98,25 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
 		vtbl_rec->crc = cpu_to_be32(crc);
 	}
 
-	mutex_lock(&ubi->vtbl_mutex);
 	memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
 	for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
-		err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i);
-		if (err) {
-			mutex_unlock(&ubi->vtbl_mutex);
+		err = ubi_eba_unmap_leb(ubi, layout_vol, i);
+		if (err)
 			return err;
-		}
-		err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0,
+
+		err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
 					ubi->vtbl_size, UBI_LONGTERM);
-		if (err) {
-			mutex_unlock(&ubi->vtbl_mutex);
+		if (err)
 			return err;
-		}
 	}
 
 	paranoid_vtbl_check(ubi);
-	mutex_unlock(&ubi->vtbl_mutex);
-	return ubi_wl_flush(ubi);
+	return 0;
 }
 
 /**
- * vol_til_check - check if volume table is not corrupted and contains sensible
- * data.
- *
+ * vtbl_check - check if volume table is not corrupted and contains sensible
+ *              data.
  * @ubi: UBI device description object
  * @vtbl: volume table
  *
@@ -273,7 +269,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
 	 * this volume table copy was found during scanning. It has to be wiped
 	 * out.
 	 */
-	sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID);
+	sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
 	if (sv)
 		old_seb = ubi_scan_find_seb(sv, copy);
 
@@ -285,7 +281,7 @@ retry:
 	}
 
 	vid_hdr->vol_type = UBI_VID_DYNAMIC;
-	vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID);
+	vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
 	vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
 	vid_hdr->data_size = vid_hdr->used_ebs =
 			     vid_hdr->data_pad = cpu_to_be32(0);
@@ -518,6 +514,17 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
 		vol->name[vol->name_len] = '\0';
 		vol->vol_id = i;
 
+		if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
+			/* Auto re-size flag may be set only for one volume */
+			if (ubi->autoresize_vol_id != -1) {
+				ubi_err("more then one auto-resize volume (%d "
+					"and %d)", ubi->autoresize_vol_id, i);
+				return -EINVAL;
+			}
+
+			ubi->autoresize_vol_id = i;
+		}
+
 		ubi_assert(!ubi->volumes[i]);
 		ubi->volumes[i] = vol;
 		ubi->vol_count += 1;
@@ -568,6 +575,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
 		vol->last_eb_bytes = sv->last_data_size;
 	}
 
+	/* And add the layout volume */
 	vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
 	if (!vol)
 		return -ENOMEM;
@@ -582,7 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
 	vol->last_eb_bytes = vol->reserved_pebs;
 	vol->used_bytes =
 		(long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
-	vol->vol_id = UBI_LAYOUT_VOL_ID;
+	vol->vol_id = UBI_LAYOUT_VOLUME_ID;
+	vol->ref_count = 1;
 
 	ubi_assert(!ubi->volumes[i]);
 	ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
@@ -734,7 +743,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
 	ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
 	ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
 
-	sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID);
+	sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
 	if (!sv) {
 		/*
 		 * No logical eraseblocks belonging to the layout volume were
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6330c8cc72b5..a471a491f0ab 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -117,21 +117,6 @@
 #define WL_MAX_FAILURES 32
 
 /**
- * struct ubi_wl_entry - wear-leveling entry.
- * @rb: link in the corresponding RB-tree
- * @ec: erase counter
- * @pnum: physical eraseblock number
- *
- * Each physical eraseblock has a corresponding &struct wl_entry object which
- * may be kept in different RB-trees.
- */
-struct ubi_wl_entry {
-	struct rb_node rb;
-	int ec;
-	int pnum;
-};
-
-/**
  * struct ubi_wl_prot_entry - PEB protection entry.
  * @rb_pnum: link in the @wl->prot.pnum RB-tree
  * @rb_aec: link in the @wl->prot.aec RB-tree
@@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
 #define paranoid_check_in_wl_tree(e, root)
 #endif
 
-/* Slab cache for wear-leveling entries */
-static struct kmem_cache *wl_entries_slab;
-
 /**
  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
  * @e: the wear-leveling entry to add
@@ -267,15 +249,26 @@ static int do_work(struct ubi_device *ubi)
 	int err;
 	struct ubi_work *wrk;
 
-	spin_lock(&ubi->wl_lock);
+	cond_resched();
 
+	/*
+	 * @ubi->work_sem is used to synchronize with the workers. Workers take
+	 * it in read mode, so many of them may be doing works at a time. But
+	 * the queue flush code has to be sure the whole queue of works is
+	 * done, and it takes the mutex in write mode.
+	 */
+	down_read(&ubi->work_sem);
+	spin_lock(&ubi->wl_lock);
 	if (list_empty(&ubi->works)) {
 		spin_unlock(&ubi->wl_lock);
+		up_read(&ubi->work_sem);
 		return 0;
 	}
 
 	wrk = list_entry(ubi->works.next, struct ubi_work, list);
 	list_del(&wrk->list);
+	ubi->works_count -= 1;
+	ubi_assert(ubi->works_count >= 0);
 	spin_unlock(&ubi->wl_lock);
 
 	/*
@@ -286,11 +279,8 @@ static int do_work(struct ubi_device *ubi)
 	err = wrk->func(ubi, wrk, 0);
 	if (err)
 		ubi_err("work failed with error code %d", err);
+	up_read(&ubi->work_sem);
 
-	spin_lock(&ubi->wl_lock);
-	ubi->works_count -= 1;
-	ubi_assert(ubi->works_count >= 0);
-	spin_unlock(&ubi->wl_lock);
 	return err;
 }
 
@@ -549,8 +539,12 @@ retry:
  * prot_tree_del - remove a physical eraseblock from the protection trees
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock to remove
+ *
+ * This function returns PEB @pnum from the protection trees and returns zero
+ * in case of success and %-ENODEV if the PEB was not found in the protection
+ * trees.
  */
-static void prot_tree_del(struct ubi_device *ubi, int pnum)
+static int prot_tree_del(struct ubi_device *ubi, int pnum)
 {
 	struct rb_node *p;
 	struct ubi_wl_prot_entry *pe = NULL;
@@ -561,7 +555,7 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
 		pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
 
 		if (pnum == pe->e->pnum)
-			break;
+			goto found;
 
 		if (pnum < pe->e->pnum)
 			p = p->rb_left;
@@ -569,10 +563,14 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
 			p = p->rb_right;
 	}
 
+	return -ENODEV;
+
+found:
 	ubi_assert(pe->e->pnum == pnum);
 	rb_erase(&pe->rb_aec, &ubi->prot.aec);
 	rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
 	kfree(pe);
+	return 0;
 }
 
 /**
@@ -744,7 +742,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 				int cancel)
 {
-	int err, put = 0;
+	int err, put = 0, scrubbing = 0, protect = 0;
+	struct ubi_wl_prot_entry *uninitialized_var(pe);
 	struct ubi_wl_entry *e1, *e2;
 	struct ubi_vid_hdr *vid_hdr;
 
@@ -757,21 +756,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 	if (!vid_hdr)
 		return -ENOMEM;
 
+	mutex_lock(&ubi->move_mutex);
 	spin_lock(&ubi->wl_lock);
+	ubi_assert(!ubi->move_from && !ubi->move_to);
+	ubi_assert(!ubi->move_to_put);
 
-	/*
-	 * Only one WL worker at a time is supported at this implementation, so
-	 * make sure a PEB is not being moved already.
-	 */
-	if (ubi->move_to || !ubi->free.rb_node ||
+	if (!ubi->free.rb_node ||
 	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
 		/*
-		 * Only one WL worker at a time is supported at this
-		 * implementation, so if a LEB is already being moved, cancel.
-		 *
-		 * No free physical eraseblocks? Well, we cancel wear-leveling
-		 * then. It will be triggered again when a free physical
-		 * eraseblock appears.
+		 * No free physical eraseblocks? Well, they must be waiting in
+		 * the queue to be erased. Cancel movement - it will be
+		 * triggered again when a free physical eraseblock appears.
 		 *
 		 * No used physical eraseblocks? They must be temporarily
 		 * protected from being moved. They will be moved to the
@@ -780,10 +775,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 		 */
 		dbg_wl("cancel WL, a list is empty: free %d, used %d",
 		       !ubi->free.rb_node, !ubi->used.rb_node);
-		ubi->wl_scheduled = 0;
-		spin_unlock(&ubi->wl_lock);
-		ubi_free_vid_hdr(ubi, vid_hdr);
-		return 0;
+		goto out_cancel;
 	}
 
 	if (!ubi->scrub.rb_node) {
@@ -798,27 +790,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
 			dbg_wl("no WL needed: min used EC %d, max free EC %d",
 			       e1->ec, e2->ec);
-			ubi->wl_scheduled = 0;
-			spin_unlock(&ubi->wl_lock);
-			ubi_free_vid_hdr(ubi, vid_hdr);
-			return 0;
+			goto out_cancel;
 		}
 		paranoid_check_in_wl_tree(e1, &ubi->used);
 		rb_erase(&e1->rb, &ubi->used);
 		dbg_wl("move PEB %d EC %d to PEB %d EC %d",
 		       e1->pnum, e1->ec, e2->pnum, e2->ec);
 	} else {
+		/* Perform scrubbing */
+		scrubbing = 1;
 		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
 		paranoid_check_in_wl_tree(e1, &ubi->scrub);
-	rb_erase(&e1->rb, &ubi->scrub);
+		rb_erase(&e1->rb, &ubi->scrub);
 		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
 	}
 
 	paranoid_check_in_wl_tree(e2, &ubi->free);
 	rb_erase(&e2->rb, &ubi->free);
-	ubi_assert(!ubi->move_from && !ubi->move_to);
-	ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
 	ubi->move_from = e1;
 	ubi->move_to = e2;
 	spin_unlock(&ubi->wl_lock);
@@ -828,6 +817,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 	 * We so far do not know which logical eraseblock our physical
 	 * eraseblock (@e1) belongs to. We have to read the volume identifier
 	 * header first.
+	 *
+	 * Note, we are protected from this PEB being unmapped and erased. The
+	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
+	 * which is being moved was unmapped.
 	 */
 
 	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
@@ -842,32 +835,51 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 			 * likely have the VID header in place.
 			 */
 			dbg_wl("PEB %d has no VID header", e1->pnum);
-			err = 0;
-		} else {
-			ubi_err("error %d while reading VID header from PEB %d",
-				err, e1->pnum);
-			if (err > 0)
-				err = -EIO;
+			goto out_not_moved;
 		}
-		goto error;
+
+		ubi_err("error %d while reading VID header from PEB %d",
+			err, e1->pnum);
+		if (err > 0)
+			err = -EIO;
+		goto out_error;
 	}
 
 	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
 	if (err) {
-		if (err == UBI_IO_BITFLIPS)
-			err = 0;
-		goto error;
+
+		if (err < 0)
+			goto out_error;
+		if (err == 1)
+			goto out_not_moved;
+
+		/*
+		 * For some reason the LEB was not moved - it might be because
+		 * the volume is being deleted. We should prevent this PEB from
+		 * being selected for wear-levelling movement for some "time",
+		 * so put it to the protection tree.
+		 */
+
+		dbg_wl("cancelled moving PEB %d", e1->pnum);
+		pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
+		if (!pe) {
+			err = -ENOMEM;
+			goto out_error;
+		}
+
+		protect = 1;
 	}
 
 	ubi_free_vid_hdr(ubi, vid_hdr);
 	spin_lock(&ubi->wl_lock);
+	if (protect)
+		prot_tree_add(ubi, e1, pe, protect);
 	if (!ubi->move_to_put)
 		wl_tree_add(e2, &ubi->used);
 	else
 		put = 1;
 	ubi->move_from = ubi->move_to = NULL;
-	ubi->move_from_put = ubi->move_to_put = 0;
-	ubi->wl_scheduled = 0;
+	ubi->move_to_put = ubi->wl_scheduled = 0;
 	spin_unlock(&ubi->wl_lock);
 
 	if (put) {
@@ -877,62 +889,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 		 */
 		dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
 		err = schedule_erase(ubi, e2, 0);
-		if (err) {
-			kmem_cache_free(wl_entries_slab, e2);
-			ubi_ro_mode(ubi);
-		}
+		if (err)
+			goto out_error;
 	}
 
-	err = schedule_erase(ubi, e1, 0);
-	if (err) {
-		kmem_cache_free(wl_entries_slab, e1);
-		ubi_ro_mode(ubi);
+	if (!protect) {
+		err = schedule_erase(ubi, e1, 0);
+		if (err)
+			goto out_error;
 	}
 
+
 	dbg_wl("done");
-	return err;
+	mutex_unlock(&ubi->move_mutex);
+	return 0;
 
 	/*
-	 * Some error occurred. @e1 was not changed, so return it back. @e2
-	 * might be changed, schedule it for erasure.
+	 * For some reasons the LEB was not moved, might be an error, might be
+	 * something else. @e1 was not changed, so return it back. @e2 might
+	 * be changed, schedule it for erasure.
 	 */
-error:
-	if (err)
-		dbg_wl("error %d occurred, cancel operation", err);
-	ubi_assert(err <= 0);
-
+out_not_moved:
 	ubi_free_vid_hdr(ubi, vid_hdr);
 	spin_lock(&ubi->wl_lock);
-	ubi->wl_scheduled = 0;
-	if (ubi->move_from_put)
-		put = 1;
+	if (scrubbing)
+		wl_tree_add(e1, &ubi->scrub);
 	else
 		wl_tree_add(e1, &ubi->used);
 	ubi->move_from = ubi->move_to = NULL;
-	ubi->move_from_put = ubi->move_to_put = 0;
+	ubi->move_to_put = ubi->wl_scheduled = 0;
 	spin_unlock(&ubi->wl_lock);
 
-	if (put) {
-		/*
-		 * Well, the target PEB was put meanwhile, schedule it for
-		 * erasure.
-		 */
-		dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
-		err = schedule_erase(ubi, e1, 0);
-		if (err) {
-			kmem_cache_free(wl_entries_slab, e1);
-			ubi_ro_mode(ubi);
-		}
-	}
-
 	err = schedule_erase(ubi, e2, 0);
-	if (err) {
-		kmem_cache_free(wl_entries_slab, e2);
-		ubi_ro_mode(ubi);
-	}
+	if (err)
+		goto out_error;
+
+	mutex_unlock(&ubi->move_mutex);
+	return 0;
+
+out_error:
+	ubi_err("error %d while moving PEB %d to PEB %d",
+		err, e1->pnum, e2->pnum);
 
-	yield();
+	ubi_free_vid_hdr(ubi, vid_hdr);
+	spin_lock(&ubi->wl_lock);
+	ubi->move_from = ubi->move_to = NULL;
+	ubi->move_to_put = ubi->wl_scheduled = 0;
+	spin_unlock(&ubi->wl_lock);
+
+	kmem_cache_free(ubi_wl_entry_slab, e1);
+	kmem_cache_free(ubi_wl_entry_slab, e2);
+	ubi_ro_mode(ubi);
+
+	mutex_unlock(&ubi->move_mutex);
 	return err;
+
+out_cancel:
+	ubi->wl_scheduled = 0;
+	spin_unlock(&ubi->wl_lock);
+	mutex_unlock(&ubi->move_mutex);
+	ubi_free_vid_hdr(ubi, vid_hdr);
+	return 0;
 }
 
 /**
@@ -1020,7 +1037,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
 	if (cancel) {
 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
 		kfree(wl_wrk);
-		kmem_cache_free(wl_entries_slab, e);
+		kmem_cache_free(ubi_wl_entry_slab, e);
 		return 0;
 	}
 
@@ -1049,7 +1066,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
 
 	ubi_err("failed to erase PEB %d, error %d", pnum, err);
 	kfree(wl_wrk);
-	kmem_cache_free(wl_entries_slab, e);
+	kmem_cache_free(ubi_wl_entry_slab, e);
 
 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
 	    err == -EBUSY) {
@@ -1119,8 +1136,7 @@ out_ro:
 }
 
 /**
- * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling
- * unit.
+ * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
  * @ubi: UBI device description object
  * @pnum: physical eraseblock to return
  * @torture: if this physical eraseblock has to be tortured
@@ -1128,7 +1144,7 @@ out_ro:
  * This function is called to return physical eraseblock @pnum to the pool of
  * free physical eraseblocks. The @torture flag has to be set if an I/O error
  * occurred to this @pnum and it has to be tested. This function returns zero
- * in case of success and a negative error code in case of failure.
+ * in case of success, and a negative error code in case of failure.
  */
 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
 {
@@ -1139,8 +1155,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
 	ubi_assert(pnum >= 0);
 	ubi_assert(pnum < ubi->peb_count);
 
+retry:
 	spin_lock(&ubi->wl_lock);
-
 	e = ubi->lookuptbl[pnum];
 	if (e == ubi->move_from) {
 		/*
@@ -1148,17 +1164,22 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
 		 * be moved. It will be scheduled for erasure in the
 		 * wear-leveling worker.
 		 */
-		dbg_wl("PEB %d is being moved", pnum);
-		ubi_assert(!ubi->move_from_put);
-		ubi->move_from_put = 1;
+		dbg_wl("PEB %d is being moved, wait", pnum);
 		spin_unlock(&ubi->wl_lock);
-		return 0;
+
+		/* Wait for the WL worker by taking the @ubi->move_mutex */
+		mutex_lock(&ubi->move_mutex);
+		mutex_unlock(&ubi->move_mutex);
+		goto retry;
 	} else if (e == ubi->move_to) {
 		/*
 		 * User is putting the physical eraseblock which was selected
 		 * as the target the data is moved to. It may happen if the EBA
-		 * unit already re-mapped the LEB but the WL unit did has not
-		 * put the PEB to the "used" tree.
+		 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
+		 * the WL unit has not put the PEB to the "used" tree yet, but
+		 * it is about to do this. So we just set a flag which will
+		 * tell the WL worker that the PEB is not needed anymore and
+		 * should be scheduled for erasure.
 		 */
 		dbg_wl("PEB %d is the target of data moving", pnum);
 		ubi_assert(!ubi->move_to_put);
@@ -1172,8 +1193,15 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
 		} else if (in_wl_tree(e, &ubi->scrub)) {
 			paranoid_check_in_wl_tree(e, &ubi->scrub);
 			rb_erase(&e->rb, &ubi->scrub);
-		} else
-			prot_tree_del(ubi, e->pnum);
+		} else {
+			err = prot_tree_del(ubi, e->pnum);
+			if (err) {
+				ubi_err("PEB %d not found", pnum);
+				ubi_ro_mode(ubi);
+				spin_unlock(&ubi->wl_lock);
+				return err;
+			}
+		}
 	}
 	spin_unlock(&ubi->wl_lock);
 
@@ -1227,8 +1255,17 @@ retry:
 	if (in_wl_tree(e, &ubi->used)) {
 		paranoid_check_in_wl_tree(e, &ubi->used);
 		rb_erase(&e->rb, &ubi->used);
-	} else
-		prot_tree_del(ubi, pnum);
+	} else {
+		int err;
+
+		err = prot_tree_del(ubi, e->pnum);
+		if (err) {
+			ubi_err("PEB %d not found", pnum);
+			ubi_ro_mode(ubi);
+			spin_unlock(&ubi->wl_lock);
+			return err;
+		}
+	}
 
 	wl_tree_add(e, &ubi->scrub);
 	spin_unlock(&ubi->wl_lock);
@@ -1249,17 +1286,32 @@ retry:
  */
 int ubi_wl_flush(struct ubi_device *ubi)
 {
-	int err, pending_count;
-
-	pending_count = ubi->works_count;
-
-	dbg_wl("flush (%d pending works)", pending_count);
+	int err;
 
 	/*
 	 * Erase while the pending works queue is not empty, but not more then
 	 * the number of currently pending works.
 	 */
-	while (pending_count-- > 0) {
+	dbg_wl("flush (%d pending works)", ubi->works_count);
+	while (ubi->works_count) {
+		err = do_work(ubi);
+		if (err)
+			return err;
+	}
+
+	/*
+	 * Make sure all the works which have been done in parallel are
+	 * finished.
+	 */
+	down_write(&ubi->work_sem);
+	up_write(&ubi->work_sem);
+
+	/*
+	 * And in case last was the WL worker and it cancelled the LEB
+	 * movement, flush again.
+	 */
+	while (ubi->works_count) {
+		dbg_wl("flush more (%d pending works)", ubi->works_count);
 		err = do_work(ubi);
 		if (err)
 			return err;
@@ -1294,7 +1346,7 @@ static void tree_destroy(struct rb_root *root)
 					rb->rb_right = NULL;
 			}
 
-			kmem_cache_free(wl_entries_slab, e);
+			kmem_cache_free(ubi_wl_entry_slab, e);
 		}
 	}
 }
@@ -1303,7 +1355,7 @@ static void tree_destroy(struct rb_root *root)
  * ubi_thread - UBI background thread.
  * @u: the UBI device description object pointer
  */
-static int ubi_thread(void *u)
+int ubi_thread(void *u)
 {
 	int failures = 0;
 	struct ubi_device *ubi = u;
@@ -1394,36 +1446,22 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
 	ubi->used = ubi->free = ubi->scrub = RB_ROOT;
 	ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
 	spin_lock_init(&ubi->wl_lock);
+	mutex_init(&ubi->move_mutex);
+	init_rwsem(&ubi->work_sem);
 	ubi->max_ec = si->max_ec;
 	INIT_LIST_HEAD(&ubi->works);
 
 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
 
-	ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
-	if (IS_ERR(ubi->bgt_thread)) {
-		err = PTR_ERR(ubi->bgt_thread);
-		ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
-			err);
-		return err;
-	}
-
-	if (ubi_devices_cnt == 0) {
-		wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
-						    sizeof(struct ubi_wl_entry),
-						    0, 0, NULL);
-		if (!wl_entries_slab)
-			return -ENOMEM;
-	}
-
 	err = -ENOMEM;
 	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
 	if (!ubi->lookuptbl)
-		goto out_free;
+		return err;
 
 	list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
 		cond_resched();
 
-		e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 		if (!e)
 			goto out_free;
 
@@ -1431,7 +1469,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
 		e->ec = seb->ec;
 		ubi->lookuptbl[e->pnum] = e;
 		if (schedule_erase(ubi, e, 0)) {
-			kmem_cache_free(wl_entries_slab, e);
+			kmem_cache_free(ubi_wl_entry_slab, e);
 			goto out_free;
 		}
 	}
@@ -1439,7 +1477,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
 	list_for_each_entry(seb, &si->free, u.list) {
 		cond_resched();
 
-		e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 		if (!e)
 			goto out_free;
 
@@ -1453,7 +1491,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
 	list_for_each_entry(seb, &si->corr, u.list) {
 		cond_resched();
 
-		e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 		if (!e)
 			goto out_free;
 
@@ -1461,7 +1499,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
 		e->ec = seb->ec;
 		ubi->lookuptbl[e->pnum] = e;
 		if (schedule_erase(ubi, e, 0)) {
-			kmem_cache_free(wl_entries_slab, e);
+			kmem_cache_free(ubi_wl_entry_slab, e);
 			goto out_free;
 		}
 	}
@@ -1470,7 +1508,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
 		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
 			cond_resched();
 
-			e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 			if (!e)
 				goto out_free;
 
@@ -1510,8 +1548,6 @@ out_free:
 	tree_destroy(&ubi->free);
 	tree_destroy(&ubi->scrub);
 	kfree(ubi->lookuptbl);
-	if (ubi_devices_cnt == 0)
-		kmem_cache_destroy(wl_entries_slab);
 	return err;
 }
 
@@ -1541,7 +1577,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
 					rb->rb_right = NULL;
 			}
 
-			kmem_cache_free(wl_entries_slab, pe->e);
+			kmem_cache_free(ubi_wl_entry_slab, pe->e);
 			kfree(pe);
 		}
 	}
@@ -1553,10 +1589,6 @@ static void protection_trees_destroy(struct ubi_device *ubi)
  */
 void ubi_wl_close(struct ubi_device *ubi)
 {
-	dbg_wl("disable \"%s\"", ubi->bgt_name);
-	if (ubi->bgt_thread)
-		kthread_stop(ubi->bgt_thread);
-
 	dbg_wl("close the UBI wear-leveling unit");
 
 	cancel_pending(ubi);
@@ -1565,8 +1597,6 @@ void ubi_wl_close(struct ubi_device *ubi)
 	tree_destroy(&ubi->free);
 	tree_destroy(&ubi->scrub);
 	kfree(ubi->lookuptbl);
-	if (ubi_devices_cnt == 1)
-		kmem_cache_destroy(wl_entries_slab);
 }
 
 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID