summary refs log tree commit diff
path: root/fs/jffs2
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /fs/jffs2
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
Diffstat (limited to 'fs/jffs2')
-rw-r--r--fs/jffs2/LICENCE35
-rw-r--r--fs/jffs2/Makefile18
-rw-r--r--fs/jffs2/README.Locking148
-rw-r--r--fs/jffs2/TODO40
-rw-r--r--fs/jffs2/background.c140
-rw-r--r--fs/jffs2/build.c371
-rw-r--r--fs/jffs2/compr.c469
-rw-r--r--fs/jffs2/compr.h115
-rw-r--r--fs/jffs2/compr_rtime.c132
-rw-r--r--fs/jffs2/compr_rubin.c373
-rw-r--r--fs/jffs2/compr_rubin.h21
-rw-r--r--fs/jffs2/compr_zlib.c218
-rw-r--r--fs/jffs2/comprtest.c307
-rw-r--r--fs/jffs2/dir.c799
-rw-r--r--fs/jffs2/erase.c442
-rw-r--r--fs/jffs2/file.c290
-rw-r--r--fs/jffs2/fs.c677
-rw-r--r--fs/jffs2/gc.c1246
-rw-r--r--fs/jffs2/histo.h3
-rw-r--r--fs/jffs2/histo_mips.h2
-rw-r--r--fs/jffs2/ioctl.c23
-rw-r--r--fs/jffs2/malloc.c205
-rw-r--r--fs/jffs2/nodelist.c681
-rw-r--r--fs/jffs2/nodelist.h473
-rw-r--r--fs/jffs2/nodemgmt.c838
-rw-r--r--fs/jffs2/os-linux.h217
-rw-r--r--fs/jffs2/pushpull.h72
-rw-r--r--fs/jffs2/read.c246
-rw-r--r--fs/jffs2/readinode.c695
-rw-r--r--fs/jffs2/scan.c916
-rw-r--r--fs/jffs2/super.c365
-rw-r--r--fs/jffs2/symlink.c45
-rw-r--r--fs/jffs2/wbuf.c1184
-rw-r--r--fs/jffs2/write.c708
-rw-r--r--fs/jffs2/writev.c50
35 files changed, 12564 insertions, 0 deletions
diff --git a/fs/jffs2/LICENCE b/fs/jffs2/LICENCE
new file mode 100644
index 000000000000..cd81d83e4ad2
--- /dev/null
+++ b/fs/jffs2/LICENCE
@@ -0,0 +1,35 @@
+The files in this directory and elsewhere which refer to this LICENCE
+file are part of JFFS2, the Journalling Flash File System v2.
+
+	Copyright (C) 2001, 2002 Red Hat, Inc.
+
+JFFS2 is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 or (at your option) any later 
+version.
+
+JFFS2 is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License along
+with JFFS2; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+As a special exception, if other files instantiate templates or use
+macros or inline functions from these files, or you compile these
+files and link them with other works to produce a work based on these
+files, these files do not by themselves cause the resulting work to be
+covered by the GNU General Public License. However the source code for
+these files must still be made available in accordance with section (3)
+of the GNU General Public License.
+
+This exception does not invalidate any other reasons why a work based on
+this file might be covered by the GNU General Public License.
+
+For information on obtaining alternative licences for JFFS2, see 
+http://sources.redhat.com/jffs2/jffs2-licence.html
+
+
+	$Id: LICENCE,v 1.1 2002/05/20 14:56:37 dwmw2 Exp $
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile
new file mode 100644
index 000000000000..e3c38ccf9c7d
--- /dev/null
+++ b/fs/jffs2/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Linux Journalling Flash File System v2 (JFFS2)
+#
+# $Id: Makefile.common,v 1.7 2004/11/03 12:57:38 jwboyer Exp $
+#
+
+obj-$(CONFIG_JFFS2_FS) += jffs2.o
+
+jffs2-y	:= compr.o dir.o file.o ioctl.o nodelist.o malloc.o
+jffs2-y	+= read.o nodemgmt.o readinode.o write.o scan.o gc.o
+jffs2-y	+= symlink.o build.o erase.o background.o fs.o writev.o
+jffs2-y	+= super.o
+
+jffs2-$(CONFIG_JFFS2_FS_NAND)	+= wbuf.o
+jffs2-$(CONFIG_JFFS2_FS_NOR_ECC) += wbuf.o
+jffs2-$(CONFIG_JFFS2_RUBIN)	+= compr_rubin.o
+jffs2-$(CONFIG_JFFS2_RTIME)	+= compr_rtime.o
+jffs2-$(CONFIG_JFFS2_ZLIB)	+= compr_zlib.o
diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
new file mode 100644
index 000000000000..49771cf8513a
--- /dev/null
+++ b/fs/jffs2/README.Locking
@@ -0,0 +1,148 @@
+	$Id: README.Locking,v 1.9 2004/11/20 10:35:40 dwmw2 Exp $
+
+	JFFS2 LOCKING DOCUMENTATION
+	---------------------------
+
+At least theoretically, JFFS2 does not require the Big Kernel Lock
+(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
+code. It has its own locking, as described below.
+
+This document attempts to describe the existing locking rules for
+JFFS2. It is not expected to remain perfectly up to date, but ought to
+be fairly close.
+
+
+	alloc_sem
+	---------
+
+The alloc_sem is a per-filesystem semaphore, used primarily to ensure
+contiguous allocation of space on the medium. It is automatically
+obtained during space allocations (jffs2_reserve_space()) and freed
+upon write completion (jffs2_complete_reservation()). Note that
+the garbage collector will obtain this right at the beginning of
+jffs2_garbage_collect_pass() and release it at the end, thereby
+preventing any other write activity on the file system during a
+garbage collect pass.
+
+When writing new nodes, the alloc_sem must be held until the new nodes
+have been properly linked into the data structures for the inode to
+which they belong. This is for the benefit of NAND flash - adding new
+nodes to an inode may obsolete old ones, and by holding the alloc_sem
+until this happens we ensure that any data in the write-buffer at the
+time this happens are part of the new node, not just something that
+was written afterwards. Hence, we can ensure the newly-obsoleted nodes
+don't actually get erased until the write-buffer has been flushed to
+the medium.
+
+With the introduction of NAND flash support and the write-buffer, 
+the alloc_sem is also used to protect the wbuf-related members of the
+jffs2_sb_info structure. Atomically reading the wbuf_len member to see
+if the wbuf is currently holding any data is permitted, though.
+
+Ordering constraints: See f->sem.
+
+
+	File Semaphore f->sem
+	---------------------
+
+This is the JFFS2-internal equivalent of the inode semaphore i->i_sem.
+It protects the contents of the jffs2_inode_info private inode data,
+including the linked list of node fragments (but see the notes below on
+erase_completion_lock), etc.
+
+The reason that the i_sem itself isn't used for this purpose is to
+avoid deadlocks with garbage collection -- the VFS will lock the i_sem
+before calling a function which may need to allocate space. The
+allocation may trigger garbage-collection, which may need to move a
+node belonging to the inode which was locked in the first place by the
+VFS. If the garbage collection code were to attempt to lock the i_sem
+of the inode from which it's garbage-collecting a physical node, this
+lead to deadlock, unless we played games with unlocking the i_sem
+before calling the space allocation functions.
+
+Instead of playing such games, we just have an extra internal
+semaphore, which is obtained by the garbage collection code and also
+by the normal file system code _after_ allocation of space.
+
+Ordering constraints: 
+
+	1. Never attempt to allocate space or lock alloc_sem with 
+	   any f->sem held.
+	2. Never attempt to lock two file semaphores in one thread.
+	   No ordering rules have been made for doing so.
+
+
+	erase_completion_lock spinlock
+	------------------------------
+
+This is used to serialise access to the eraseblock lists, to the
+per-eraseblock lists of physical jffs2_raw_node_ref structures, and
+(NB) the per-inode list of physical nodes. The latter is a special
+case - see below.
+
+As the MTD API no longer permits erase-completion callback functions
+to be called from bottom-half (timer) context (on the basis that nobody
+ever actually implemented such a thing), it's now sufficient to use
+a simple spin_lock() rather than spin_lock_bh().
+
+Note that the per-inode list of physical nodes (f->nodes) is a special
+case. Any changes to _valid_ nodes (i.e. ->flash_offset & 1 == 0) in
+the list are protected by the file semaphore f->sem. But the erase
+code may remove _obsolete_ nodes from the list while holding only the
+erase_completion_lock. So you can walk the list only while holding the
+erase_completion_lock, and can drop the lock temporarily mid-walk as
+long as the pointer you're holding is to a _valid_ node, not an
+obsolete one.
+
+The erase_completion_lock is also used to protect the c->gc_task
+pointer when the garbage collection thread exits. The code to kill the
+GC thread locks it, sends the signal, then unlocks it - while the GC
+thread itself locks it, zeroes c->gc_task, then unlocks on the exit path.
+
+
+	inocache_lock spinlock
+	----------------------
+
+This spinlock protects the hashed list (c->inocache_list) of the
+in-core jffs2_inode_cache objects (each inode in JFFS2 has the
+correspondent jffs2_inode_cache object). So, the inocache_lock
+has to be locked while walking the c->inocache_list hash buckets.
+
+Note, the f->sem guarantees that the correspondent jffs2_inode_cache
+will not be removed. So, it is allowed to access it without locking
+the inocache_lock spinlock. 
+
+Ordering constraints: 
+
+	If both erase_completion_lock and inocache_lock are needed, the
+	c->erase_completion has to be acquired first.
+
+
+	erase_free_sem
+	--------------
+
+This semaphore is only used by the erase code which frees obsolete
+node references and the jffs2_garbage_collect_deletion_dirent()
+function. The latter function on NAND flash must read _obsolete_ nodes
+to determine whether the 'deletion dirent' under consideration can be
+discarded or whether it is still required to show that an inode has
+been unlinked. Because reading from the flash may sleep, the
+erase_completion_lock cannot be held, so an alternative, more
+heavyweight lock was required to prevent the erase code from freeing
+the jffs2_raw_node_ref structures in question while the garbage
+collection code is looking at them.
+
+Suggestions for alternative solutions to this problem would be welcomed.
+
+
+	wbuf_sem
+	--------
+
+This read/write semaphore protects against concurrent access to the
+write-behind buffer ('wbuf') used for flash chips where we must write
+in blocks. It protects both the contents of the wbuf and the metadata
+which indicates which flash region (if any) is currently covered by 
+the buffer.
+
+Ordering constraints:
+	Lock wbuf_sem last, after the alloc_sem or and f->sem.
diff --git a/fs/jffs2/TODO b/fs/jffs2/TODO
new file mode 100644
index 000000000000..2bff82fd221f
--- /dev/null
+++ b/fs/jffs2/TODO
@@ -0,0 +1,40 @@
+$Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $
+
+ - disable compression in commit_write()?
+ - fine-tune the allocation / GC thresholds
+ - chattr support - turning on/off and tuning compression per-inode
+ - checkpointing (do we need this? scan is quite fast)
+ - make the scan code populate real inodes so read_inode just after 
+	mount doesn't have to read the flash twice for large files.
+	Make this a per-inode option, changable with chattr, so you can
+	decide which inodes should be in-core immediately after mount.
+ - test, test, test
+
+ - NAND flash support:
+	- flush_wbuf using GC to fill it, don't just pad.
+	- Deal with write errors. Data don't get lost - we just have to write 
+	  the affected node(s) out again somewhere else.
+	- make fsync flush only if actually required
+	- make sys_sync() work.
+	- reboot notifier
+	- timed flush of old wbuf
+	- fix magical second arg of jffs2_flush_wbuf(). Split into two or more functions instead.
+
+
+ - Optimisations:
+   - Stop GC from decompressing and immediately recompressing nodes which could
+     just be copied intact. (We now keep track of REF_PRISTINE flag. Easy now.)
+   - Furthermore, in the case where it could be copied intact we don't even need
+     to call iget() for it -- if we use (raw_node_raw->flash_offset & 2) as a flag
+     to show a node can be copied intact and it's _not_ in icache, we could just do
+     it, fix up the next_in_ino list and move on. We would need a way to find out
+     _whether_ it's in icache though -- if it's in icache we also need to do the 
+     fragment lists, etc. P'raps a flag or pointer in the jffs2_inode_cache could
+     help. (We have half of this now.)
+   - Stop keeping name in-core with struct jffs2_full_dirent. If we keep the hash in 
+     the full dirent, we only need to go to the flash in lookup() when we think we've
+     got a match, and in readdir(). 
+   - Doubly-linked next_in_ino list to allow us to free obsoleted raw_node_refs immediately?
+   - Remove totlen from jffs2_raw_node_ref? Need to have totlen passed into
+	jffs2_mark_node_obsolete(). Can all callers work it out?
+   - Remove size from jffs2_raw_node_frag. 
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
new file mode 100644
index 000000000000..1be6de27dd81
--- /dev/null
+++ b/fs/jffs2/background.c
@@ -0,0 +1,140 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: background.c,v 1.50 2004/11/16 20:36:10 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/jffs2.h>
+#include <linux/mtd/mtd.h>
+#include <linux/completion.h>
+#include "nodelist.h"
+
+
+static int jffs2_garbage_collect_thread(void *);
+
+void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
+{
+	spin_lock(&c->erase_completion_lock);
+        if (c->gc_task && jffs2_thread_should_wake(c))
+                send_sig(SIGHUP, c->gc_task, 1);
+	spin_unlock(&c->erase_completion_lock);
+}
+
+/* This must only ever be called when no GC thread is currently running */
+int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
+{
+	pid_t pid;
+	int ret = 0;
+
+	if (c->gc_task)
+		BUG();
+
+	init_MUTEX_LOCKED(&c->gc_thread_start);
+	init_completion(&c->gc_thread_exit);
+
+	pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES);
+	if (pid < 0) {
+		printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid);
+		complete(&c->gc_thread_exit);
+		ret = pid;
+	} else {
+		/* Wait for it... */
+		D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid));
+		down(&c->gc_thread_start);
+	}
+ 
+	return ret;
+}
+
+void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c)
+{
+	spin_lock(&c->erase_completion_lock);
+	if (c->gc_task) {
+		D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid));
+		send_sig(SIGKILL, c->gc_task, 1);
+	}
+	spin_unlock(&c->erase_completion_lock);
+	wait_for_completion(&c->gc_thread_exit);
+}
+
+static int jffs2_garbage_collect_thread(void *_c)
+{
+	struct jffs2_sb_info *c = _c;
+
+	daemonize("jffs2_gcd_mtd%d", c->mtd->index);
+	allow_signal(SIGKILL);
+	allow_signal(SIGSTOP);
+	allow_signal(SIGCONT);
+
+	c->gc_task = current;
+	up(&c->gc_thread_start);
+
+	set_user_nice(current, 10);
+
+	for (;;) {
+		allow_signal(SIGHUP);
+
+		if (!jffs2_thread_should_wake(c)) {
+			set_current_state (TASK_INTERRUPTIBLE);
+			D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
+			/* Yes, there's a race here; we checked jffs2_thread_should_wake()
+			   before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
+			   matter - We don't care if we miss a wakeup, because the GC thread
+			   is only an optimisation anyway. */
+			schedule();
+		}
+
+		if (try_to_freeze(0))
+			continue;
+
+		cond_resched();
+
+		/* Put_super will send a SIGKILL and then wait on the sem. 
+		 */
+		while (signal_pending(current)) {
+			siginfo_t info;
+			unsigned long signr;
+
+			signr = dequeue_signal_lock(current, &current->blocked, &info);
+
+			switch(signr) {
+			case SIGSTOP:
+				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
+				set_current_state(TASK_STOPPED);
+				schedule();
+				break;
+
+			case SIGKILL:
+				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
+				goto die;
+
+			case SIGHUP:
+				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
+				break;
+			default:
+				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
+			}
+		}
+		/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
+		disallow_signal(SIGHUP);
+
+		D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
+		if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
+			printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
+			goto die;
+		}
+	}
+ die:
+	spin_lock(&c->erase_completion_lock);
+	c->gc_task = NULL;
+	spin_unlock(&c->erase_completion_lock);
+	complete_and_exit(&c->gc_thread_exit, 0);
+}
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
new file mode 100644
index 000000000000..a01dd5fdbb95
--- /dev/null
+++ b/fs/jffs2/build.c
@@ -0,0 +1,371 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: build.c,v 1.69 2004/12/16 20:22:18 dmarlin Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+
+static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **);
+
+static inline struct jffs2_inode_cache *
+first_inode_chain(int *i, struct jffs2_sb_info *c)
+{
+	for (; *i < INOCACHE_HASHSIZE; (*i)++) {
+		if (c->inocache_list[*i])
+			return c->inocache_list[*i];
+	}
+	return NULL;
+}
+
+static inline struct jffs2_inode_cache *
+next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
+{
+	/* More in this chain? */
+	if (ic->next)
+		return ic->next;
+	(*i)++;
+	return first_inode_chain(i, c);
+}
+
+#define for_each_inode(i, c, ic)			\
+	for (i = 0, ic = first_inode_chain(&i, (c));	\
+	     ic;					\
+	     ic = next_inode(&i, ic, (c)))
+
+
+static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+	struct jffs2_full_dirent *fd;
+
+	D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino));
+
+	/* For each child, increase nlink */
+	for(fd = ic->scan_dents; fd; fd = fd->next) {
+		struct jffs2_inode_cache *child_ic;
+		if (!fd->ino)
+			continue;
+
+		/* XXX: Can get high latency here with huge directories */
+
+		child_ic = jffs2_get_ino_cache(c, fd->ino);
+		if (!child_ic) {
+			printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
+				  fd->name, fd->ino, ic->ino);
+			jffs2_mark_node_obsolete(c, fd->raw);
+			continue;
+		}
+
+		if (child_ic->nlink++ && fd->type == DT_DIR) {
+			printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
+			if (fd->ino == 1 && ic->ino == 1) {
+				printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
+				printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
+			}
+			/* What do we do about it? */
+		}
+		D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
+		/* Can't free them. We might need them in pass 2 */
+	}
+}
+
+/* Scan plan:
+ - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
+ - Scan directory tree from top down, setting nlink in inocaches
+ - Scan inocaches for inodes with nlink==0
+*/
+static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+{
+	int ret;
+	int i;
+	struct jffs2_inode_cache *ic;
+	struct jffs2_full_dirent *fd;
+	struct jffs2_full_dirent *dead_fds = NULL;
+
+	/* First, scan the medium and build all the inode caches with
+	   lists of physical nodes */
+
+	c->flags |= JFFS2_SB_FLAG_MOUNTING;
+	ret = jffs2_scan_medium(c);
+	if (ret)
+		goto exit;
+
+	D1(printk(KERN_DEBUG "Scanned flash completely\n"));
+	D2(jffs2_dump_block_lists(c));
+
+	/* Now scan the directory tree, increasing nlink according to every dirent found. */
+	for_each_inode(i, c, ic) {
+		D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino));
+
+		D1(BUG_ON(ic->ino > c->highest_ino));
+
+		if (ic->scan_dents) {
+			jffs2_build_inode_pass1(c, ic);
+			cond_resched();
+		}
+	}
+	c->flags &= ~JFFS2_SB_FLAG_MOUNTING;
+
+	D1(printk(KERN_DEBUG "Pass 1 complete\n"));
+
+	/* Next, scan for inodes with nlink == 0 and remove them. If
+	   they were directories, then decrement the nlink of their
+	   children too, and repeat the scan. As that's going to be
+	   a fairly uncommon occurrence, it's not so evil to do it this
+	   way. Recursion bad. */
+	D1(printk(KERN_DEBUG "Pass 2 starting\n"));
+
+	for_each_inode(i, c, ic) {
+		D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes));
+		if (ic->nlink)
+			continue;
+			
+		jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
+		cond_resched();
+	} 
+
+	D1(printk(KERN_DEBUG "Pass 2a starting\n"));
+
+	while (dead_fds) {
+		fd = dead_fds;
+		dead_fds = fd->next;
+
+		ic = jffs2_get_ino_cache(c, fd->ino);
+		D1(printk(KERN_DEBUG "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic));
+
+		if (ic)
+			jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
+		jffs2_free_full_dirent(fd);
+	}
+
+	D1(printk(KERN_DEBUG "Pass 2 complete\n"));
+	
+	/* Finally, we can scan again and free the dirent structs */
+	for_each_inode(i, c, ic) {
+		D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes));
+
+		while(ic->scan_dents) {
+			fd = ic->scan_dents;
+			ic->scan_dents = fd->next;
+			jffs2_free_full_dirent(fd);
+		}
+		ic->scan_dents = NULL;
+		cond_resched();
+	}
+	D1(printk(KERN_DEBUG "Pass 3 complete\n"));
+	D2(jffs2_dump_block_lists(c));
+
+	/* Rotate the lists by some number to ensure wear levelling */
+	jffs2_rotate_lists(c);
+
+	ret = 0;
+
+exit:
+	if (ret) {
+		for_each_inode(i, c, ic) {
+			while(ic->scan_dents) {
+				fd = ic->scan_dents;
+				ic->scan_dents = fd->next;
+				jffs2_free_full_dirent(fd);
+			}
+		}
+	}
+
+	return ret;
+}
+
+static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds)
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_full_dirent *fd;
+
+	D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino));
+	
+	raw = ic->nodes;
+	while (raw != (void *)ic) {
+		struct jffs2_raw_node_ref *next = raw->next_in_ino;
+		D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", ref_offset(raw)));
+		jffs2_mark_node_obsolete(c, raw);
+		raw = next;
+	}
+
+	if (ic->scan_dents) {
+		int whinged = 0;
+		D1(printk(KERN_DEBUG "Inode #%u was a directory which may have children...\n", ic->ino));
+
+		while(ic->scan_dents) {
+			struct jffs2_inode_cache *child_ic;
+
+			fd = ic->scan_dents;
+			ic->scan_dents = fd->next;
+
+			if (!fd->ino) {
+				/* It's a deletion dirent. Ignore it */
+				D1(printk(KERN_DEBUG "Child \"%s\" is a deletion dirent, skipping...\n", fd->name));
+				jffs2_free_full_dirent(fd);
+				continue;
+			}
+			if (!whinged) {
+				whinged = 1;
+				printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino);
+			}
+
+			D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n",
+				  fd->name, fd->ino));
+			
+			child_ic = jffs2_get_ino_cache(c, fd->ino);
+			if (!child_ic) {
+				printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino);
+				jffs2_free_full_dirent(fd);
+				continue;
+			}
+
+			/* Reduce nlink of the child. If it's now zero, stick it on the 
+			   dead_fds list to be cleaned up later. Else just free the fd */
+
+			child_ic->nlink--;
+			
+			if (!child_ic->nlink) {
+				D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n",
+					  fd->ino, fd->name));
+				fd->next = *dead_fds;
+				*dead_fds = fd;
+			} else {
+				D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
+					  fd->ino, fd->name, child_ic->nlink));
+				jffs2_free_full_dirent(fd);
+			}
+		}
+	}
+
+	/*
+	   We don't delete the inocache from the hash list and free it yet. 
+	   The erase code will do that, when all the nodes are completely gone.
+	*/
+}
+
+static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
+{
+	uint32_t size;
+
+	/* Deletion should almost _always_ be allowed. We're fairly
+	   buggered once we stop allowing people to delete stuff
+	   because there's not enough free space... */
+	c->resv_blocks_deletion = 2;
+
+	/* Be conservative about how much space we need before we allow writes. 
+	   On top of that which is required for deletia, require an extra 2%
+	   of the medium to be available, for overhead caused by nodes being
+	   split across blocks, etc. */
+
+	size = c->flash_size / 50; /* 2% of flash size */
+	size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
+	size += c->sector_size - 1; /* ... and round up */
+
+	c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
+
+	/* When do we let the GC thread run in the background */
+
+	c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
+
+	/* When do we allow garbage collection to merge nodes to make 
+	   long-term progress at the expense of short-term space exhaustion? */
+	c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
+
+	/* When do we allow garbage collection to eat from bad blocks rather
+	   than actually making progress? */
+	c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
+
+	/* If there's less than this amount of dirty space, don't bother
+	   trying to GC to make more space. It'll be a fruitless task */
+	c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
+
+	D1(printk(KERN_DEBUG "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
+		  c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks));
+	D1(printk(KERN_DEBUG "Blocks required to allow deletion:    %d (%d KiB)\n",
+		  c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024));
+	D1(printk(KERN_DEBUG "Blocks required to allow writes:      %d (%d KiB)\n",
+		  c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024));
+	D1(printk(KERN_DEBUG "Blocks required to quiesce GC thread: %d (%d KiB)\n",
+		  c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024));
+	D1(printk(KERN_DEBUG "Blocks required to allow GC merges:   %d (%d KiB)\n",
+		  c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024));
+	D1(printk(KERN_DEBUG "Blocks required to GC bad blocks:     %d (%d KiB)\n",
+		  c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024));
+	D1(printk(KERN_DEBUG "Amount of dirty space required to GC: %d bytes\n",
+		  c->nospc_dirty_size));
+} 
+
+int jffs2_do_mount_fs(struct jffs2_sb_info *c)
+{
+	int i;
+
+	c->free_size = c->flash_size;
+	c->nr_blocks = c->flash_size / c->sector_size;
+ 	if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
+		c->blocks = vmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks);
+	else
+		c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL);
+	if (!c->blocks)
+		return -ENOMEM;
+	for (i=0; i<c->nr_blocks; i++) {
+		INIT_LIST_HEAD(&c->blocks[i].list);
+		c->blocks[i].offset = i * c->sector_size;
+		c->blocks[i].free_size = c->sector_size;
+		c->blocks[i].dirty_size = 0;
+		c->blocks[i].wasted_size = 0;
+		c->blocks[i].unchecked_size = 0;
+		c->blocks[i].used_size = 0;
+		c->blocks[i].first_node = NULL;
+		c->blocks[i].last_node = NULL;
+		c->blocks[i].bad_count = 0;
+	}
+
+	init_MUTEX(&c->alloc_sem);
+	init_MUTEX(&c->erase_free_sem);
+	init_waitqueue_head(&c->erase_wait);
+	init_waitqueue_head(&c->inocache_wq);
+	spin_lock_init(&c->erase_completion_lock);
+	spin_lock_init(&c->inocache_lock);
+
+	INIT_LIST_HEAD(&c->clean_list);
+	INIT_LIST_HEAD(&c->very_dirty_list);
+	INIT_LIST_HEAD(&c->dirty_list);
+	INIT_LIST_HEAD(&c->erasable_list);
+	INIT_LIST_HEAD(&c->erasing_list);
+	INIT_LIST_HEAD(&c->erase_pending_list);
+	INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
+	INIT_LIST_HEAD(&c->erase_complete_list);
+	INIT_LIST_HEAD(&c->free_list);
+	INIT_LIST_HEAD(&c->bad_list);
+	INIT_LIST_HEAD(&c->bad_used_list);
+	c->highest_ino = 1;
+
+	if (jffs2_build_filesystem(c)) {
+		D1(printk(KERN_DEBUG "build_fs failed\n"));
+		jffs2_free_ino_caches(c);
+		jffs2_free_raw_node_refs(c);
+		if (c->mtd->flags & MTD_NO_VIRTBLOCKS) {
+			vfree(c->blocks);
+		} else {
+			kfree(c->blocks);
+		}
+		return -EIO;
+	}
+
+	jffs2_calc_trigger_levels(c);
+
+	return 0;
+}
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
new file mode 100644
index 000000000000..af922a9618ac
--- /dev/null
+++ b/fs/jffs2/compr.c
@@ -0,0 +1,469 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ * Created by Arjan van de Ven <arjanv@redhat.com>
+ *
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ *                    University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: compr.c,v 1.42 2004/08/07 21:56:08 dwmw2 Exp $
+ *
+ */
+
+#include "compr.h"
+
+static DEFINE_SPINLOCK(jffs2_compressor_list_lock);
+
+/* Available compressors are on this list */
+static LIST_HEAD(jffs2_compressor_list);
+
+/* Actual compression mode */
+static int jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
+
+/* Statistics for blocks stored without compression */
+static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_compr_size=0;
+
+/* jffs2_compress:
+ * @data: Pointer to uncompressed data
+ * @cdata: Pointer to returned pointer to buffer for compressed data
+ * @datalen: On entry, holds the amount of data available for compression.
+ *	On exit, expected to hold the amount of data actually compressed.
+ * @cdatalen: On entry, holds the amount of space available for compressed
+ *	data. On exit, expected to hold the actual size of the compressed
+ *	data.
+ *
+ * Returns: Lower byte to be stored with data indicating compression type used.
+ * Zero is used to show that the data could not be compressed - the 
+ * compressed version was actually larger than the original.
+ * Upper byte will be used later. (soon)
+ *
+ * If the cdata buffer isn't large enough to hold all the uncompressed data,
+ * jffs2_compress should compress as much as will fit, and should set 
+ * *datalen accordingly to show the amount of data which were compressed.
+ */
+uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			     unsigned char *data_in, unsigned char **cpage_out, 
+			     uint32_t *datalen, uint32_t *cdatalen)
+{
+	int ret = JFFS2_COMPR_NONE;
+        int compr_ret;
+        struct jffs2_compressor *this, *best=NULL;
+        unsigned char *output_buf = NULL, *tmp_buf;
+        uint32_t orig_slen, orig_dlen;
+        uint32_t best_slen=0, best_dlen=0;
+
+        switch (jffs2_compression_mode) {
+        case JFFS2_COMPR_MODE_NONE:
+                break;
+        case JFFS2_COMPR_MODE_PRIORITY:
+                output_buf = kmalloc(*cdatalen,GFP_KERNEL);
+                if (!output_buf) {
+                        printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n");
+                        goto out;
+                }
+                orig_slen = *datalen;
+                orig_dlen = *cdatalen;
+                spin_lock(&jffs2_compressor_list_lock);
+                list_for_each_entry(this, &jffs2_compressor_list, list) {
+                        /* Skip decompress-only backwards-compatibility and disabled modules */
+                        if ((!this->compress)||(this->disabled))
+                                continue;
+
+                        this->usecount++;
+                        spin_unlock(&jffs2_compressor_list_lock);
+                        *datalen  = orig_slen;
+                        *cdatalen = orig_dlen;
+                        compr_ret = this->compress(data_in, output_buf, datalen, cdatalen, NULL);
+                        spin_lock(&jffs2_compressor_list_lock);
+                        this->usecount--;
+                        if (!compr_ret) {
+                                ret = this->compr;
+                                this->stat_compr_blocks++;
+                                this->stat_compr_orig_size += *datalen;
+                                this->stat_compr_new_size  += *cdatalen;
+                                break;
+                        }
+                }
+                spin_unlock(&jffs2_compressor_list_lock);
+                if (ret == JFFS2_COMPR_NONE) kfree(output_buf);
+                break;
+        case JFFS2_COMPR_MODE_SIZE:
+                orig_slen = *datalen;
+                orig_dlen = *cdatalen;
+                spin_lock(&jffs2_compressor_list_lock);
+                list_for_each_entry(this, &jffs2_compressor_list, list) {
+                        /* Skip decompress-only backwards-compatibility and disabled modules */
+                        if ((!this->compress)||(this->disabled))
+                                continue;
+                        /* Allocating memory for output buffer if necessary */
+                        if ((this->compr_buf_size<orig_dlen)&&(this->compr_buf)) {
+                                spin_unlock(&jffs2_compressor_list_lock);
+                                kfree(this->compr_buf);
+                                spin_lock(&jffs2_compressor_list_lock);
+                                this->compr_buf_size=0;
+                                this->compr_buf=NULL;
+                        }
+                        if (!this->compr_buf) {
+                                spin_unlock(&jffs2_compressor_list_lock);
+                                tmp_buf = kmalloc(orig_dlen,GFP_KERNEL);
+                                spin_lock(&jffs2_compressor_list_lock);
+                                if (!tmp_buf) {
+                                        printk(KERN_WARNING "JFFS2: No memory for compressor allocation. (%d bytes)\n",orig_dlen);
+                                        continue;
+                                }
+                                else {
+                                        this->compr_buf = tmp_buf;
+                                        this->compr_buf_size = orig_dlen;
+                                }
+                        }
+                        this->usecount++;
+                        spin_unlock(&jffs2_compressor_list_lock);
+                        *datalen  = orig_slen;
+                        *cdatalen = orig_dlen;
+                        compr_ret = this->compress(data_in, this->compr_buf, datalen, cdatalen, NULL);
+                        spin_lock(&jffs2_compressor_list_lock);
+                        this->usecount--;
+                        if (!compr_ret) {
+                                if ((!best_dlen)||(best_dlen>*cdatalen)) {
+                                        best_dlen = *cdatalen;
+                                        best_slen = *datalen;
+                                        best = this;
+                                }
+                        }
+                }
+                if (best_dlen) {
+                        *cdatalen = best_dlen;
+                        *datalen  = best_slen;
+                        output_buf = best->compr_buf;
+                        best->compr_buf = NULL;
+                        best->compr_buf_size = 0;
+                        best->stat_compr_blocks++;
+                        best->stat_compr_orig_size += best_slen;
+                        best->stat_compr_new_size  += best_dlen;
+                        ret = best->compr;
+                }
+                spin_unlock(&jffs2_compressor_list_lock);
+                break;
+        default:
+                printk(KERN_ERR "JFFS2: unknow compression mode.\n");
+        }
+ out:
+        if (ret == JFFS2_COMPR_NONE) {
+	        *cpage_out = data_in;
+	        *datalen = *cdatalen;
+                none_stat_compr_blocks++;
+                none_stat_compr_size += *datalen;
+        }
+        else {
+                *cpage_out = output_buf;
+        }
+	return ret;
+}
+
+int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+		     uint16_t comprtype, unsigned char *cdata_in, 
+		     unsigned char *data_out, uint32_t cdatalen, uint32_t datalen)
+{
+        struct jffs2_compressor *this;
+        int ret;
+
+	/* Older code had a bug where it would write non-zero 'usercompr'
+	   fields. Deal with it. */
+	if ((comprtype & 0xff) <= JFFS2_COMPR_ZLIB)
+		comprtype &= 0xff;
+
+	switch (comprtype & 0xff) {
+	case JFFS2_COMPR_NONE:
+		/* This should be special-cased elsewhere, but we might as well deal with it */
+		memcpy(data_out, cdata_in, datalen);
+                none_stat_decompr_blocks++;
+		break;
+	case JFFS2_COMPR_ZERO:
+		memset(data_out, 0, datalen);
+		break;
+	default:
+                spin_lock(&jffs2_compressor_list_lock);
+                list_for_each_entry(this, &jffs2_compressor_list, list) {
+                        if (comprtype == this->compr) {
+                                this->usecount++;
+                                spin_unlock(&jffs2_compressor_list_lock);
+                                ret = this->decompress(cdata_in, data_out, cdatalen, datalen, NULL);
+                                spin_lock(&jffs2_compressor_list_lock);
+                                if (ret) {
+                                        printk(KERN_WARNING "Decompressor \"%s\" returned %d\n", this->name, ret);
+                                }
+                                else {
+                                        this->stat_decompr_blocks++;
+                                }
+                                this->usecount--;
+                                spin_unlock(&jffs2_compressor_list_lock);
+                                return ret;
+                        }
+                }
+		printk(KERN_WARNING "JFFS2 compression type 0x%02x not available.\n", comprtype);
+                spin_unlock(&jffs2_compressor_list_lock);
+		return -EIO;
+	}
+	return 0;
+}
+
+int jffs2_register_compressor(struct jffs2_compressor *comp)
+{
+        struct jffs2_compressor *this;
+
+        if (!comp->name) {
+                printk(KERN_WARNING "NULL compressor name at registering JFFS2 compressor. Failed.\n");
+                return -1;
+        }
+        comp->compr_buf_size=0;
+        comp->compr_buf=NULL;
+        comp->usecount=0;
+        comp->stat_compr_orig_size=0;
+        comp->stat_compr_new_size=0;
+        comp->stat_compr_blocks=0;
+        comp->stat_decompr_blocks=0;
+        D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name));
+
+        spin_lock(&jffs2_compressor_list_lock);
+
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                if (this->priority < comp->priority) {
+                        list_add(&comp->list, this->list.prev);
+                        goto out;
+                }
+        }
+        list_add_tail(&comp->list, &jffs2_compressor_list);
+out:
+        D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
+                printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
+        })
+
+        spin_unlock(&jffs2_compressor_list_lock);
+
+        return 0;
+}
+
+int jffs2_unregister_compressor(struct jffs2_compressor *comp)
+{
+        D2(struct jffs2_compressor *this;)
+
+        D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name));
+
+        spin_lock(&jffs2_compressor_list_lock);
+
+        if (comp->usecount) {
+                spin_unlock(&jffs2_compressor_list_lock);
+                printk(KERN_WARNING "JFFS2: Compressor modul is in use. Unregister failed.\n");
+                return -1;
+        }
+        list_del(&comp->list);
+
+        D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
+                printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
+        })
+        spin_unlock(&jffs2_compressor_list_lock);
+        return 0;
+}
+
+#ifdef CONFIG_JFFS2_PROC
+
+#define JFFS2_STAT_BUF_SIZE 16000
+
+char *jffs2_list_compressors(void)
+{
+        struct jffs2_compressor *this;
+        char *buf, *act_buf;
+
+        act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL);
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                act_buf += sprintf(act_buf, "%10s priority:%d ", this->name, this->priority);
+                if ((this->disabled)||(!this->compress))
+                        act_buf += sprintf(act_buf,"disabled");
+                else
+                        act_buf += sprintf(act_buf,"enabled");
+                act_buf += sprintf(act_buf,"\n");
+        }
+        return buf;
+}
+
+char *jffs2_stats(void)
+{
+        struct jffs2_compressor *this;
+        char *buf, *act_buf;
+
+        act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL);
+
+        act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n");
+        act_buf += sprintf(act_buf,"%10s   ","none");
+        act_buf += sprintf(act_buf,"compr: %d blocks (%d)  decompr: %d blocks\n", none_stat_compr_blocks, 
+                           none_stat_compr_size, none_stat_decompr_blocks);
+        spin_lock(&jffs2_compressor_list_lock);
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                act_buf += sprintf(act_buf,"%10s ",this->name);
+                if ((this->disabled)||(!this->compress))
+                        act_buf += sprintf(act_buf,"- ");
+                else
+                        act_buf += sprintf(act_buf,"+ ");
+                act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d)  decompr: %d blocks ", this->stat_compr_blocks, 
+                                   this->stat_compr_new_size, this->stat_compr_orig_size, 
+                                   this->stat_decompr_blocks);
+                act_buf += sprintf(act_buf,"\n");
+        }
+        spin_unlock(&jffs2_compressor_list_lock);
+
+        return buf;
+}
+
+char *jffs2_get_compression_mode_name(void) 
+{
+        switch (jffs2_compression_mode) {
+        case JFFS2_COMPR_MODE_NONE:
+                return "none";
+        case JFFS2_COMPR_MODE_PRIORITY:
+                return "priority";
+        case JFFS2_COMPR_MODE_SIZE:
+                return "size";
+        }
+        return "unkown";
+}
+
+int jffs2_set_compression_mode_name(const char *name) 
+{
+        if (!strcmp("none",name)) {
+                jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+                return 0;
+        }
+        if (!strcmp("priority",name)) {
+                jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
+                return 0;
+        }
+        if (!strcmp("size",name)) {
+                jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
+                return 0;
+        }
+        return 1;
+}
+
+static int jffs2_compressor_Xable(const char *name, int disabled)
+{
+        struct jffs2_compressor *this;
+        spin_lock(&jffs2_compressor_list_lock);
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                if (!strcmp(this->name, name)) {
+                        this->disabled = disabled;
+                        spin_unlock(&jffs2_compressor_list_lock);
+                        return 0;                        
+                }
+        }
+        spin_unlock(&jffs2_compressor_list_lock);
+        printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name);
+        return 1;
+}
+
+int jffs2_enable_compressor_name(const char *name)
+{
+        return jffs2_compressor_Xable(name, 0);
+}
+
+int jffs2_disable_compressor_name(const char *name)
+{
+        return jffs2_compressor_Xable(name, 1);
+}
+
+int jffs2_set_compressor_priority(const char *name, int priority)
+{
+        struct jffs2_compressor *this,*comp;
+        spin_lock(&jffs2_compressor_list_lock);
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                if (!strcmp(this->name, name)) {
+                        this->priority = priority;
+                        comp = this;
+                        goto reinsert;
+                }
+        }
+        spin_unlock(&jffs2_compressor_list_lock);
+        printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name);        
+        return 1;
+reinsert:
+        /* list is sorted in the order of priority, so if
+           we change it we have to reinsert it into the
+           good place */
+        list_del(&comp->list);
+        list_for_each_entry(this, &jffs2_compressor_list, list) {
+                if (this->priority < comp->priority) {
+                        list_add(&comp->list, this->list.prev);
+                        spin_unlock(&jffs2_compressor_list_lock);
+                        return 0;
+                }
+        }
+        list_add_tail(&comp->list, &jffs2_compressor_list);
+        spin_unlock(&jffs2_compressor_list_lock);
+        return 0;
+}
+
+#endif
+
+void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
+{
+        if (orig != comprbuf)
+                kfree(comprbuf);
+}
+
+int jffs2_compressors_init(void) 
+{
+/* Registering compressors */
+#ifdef CONFIG_JFFS2_ZLIB
+        jffs2_zlib_init();
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+        jffs2_rtime_init();
+#endif
+#ifdef CONFIG_JFFS2_RUBIN
+        jffs2_rubinmips_init();
+        jffs2_dynrubin_init();
+#endif
+#ifdef CONFIG_JFFS2_LZARI
+        jffs2_lzari_init();
+#endif
+#ifdef CONFIG_JFFS2_LZO
+        jffs2_lzo_init();
+#endif
+/* Setting default compression mode */
+#ifdef CONFIG_JFFS2_CMODE_NONE
+        jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+        D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");)
+#else
+#ifdef CONFIG_JFFS2_CMODE_SIZE
+        jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
+        D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");)
+#else
+        D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");)
+#endif
+#endif
+        return 0;
+}
+
+int jffs2_compressors_exit(void) 
+{
+/* Unregistering compressors */
+#ifdef CONFIG_JFFS2_LZO
+        jffs2_lzo_exit();
+#endif
+#ifdef CONFIG_JFFS2_LZARI
+        jffs2_lzari_exit();
+#endif
+#ifdef CONFIG_JFFS2_RUBIN
+        jffs2_dynrubin_exit();
+        jffs2_rubinmips_exit();
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+        jffs2_rtime_exit();
+#endif
+#ifdef CONFIG_JFFS2_ZLIB
+        jffs2_zlib_exit();
+#endif
+        return 0;
+}
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
new file mode 100644
index 000000000000..89ceeed201eb
--- /dev/null
+++ b/fs/jffs2/compr.h
@@ -0,0 +1,115 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ *                    University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in the 
+ * jffs2 directory.
+ *
+ * $Id: compr.h,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $
+ *
+ */
+
+#ifndef __JFFS2_COMPR_H__
+#define __JFFS2_COMPR_H__
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/jffs2.h>
+#include <linux/jffs2_fs_i.h>
+#include <linux/jffs2_fs_sb.h>
+#include "nodelist.h"
+
+#define JFFS2_RUBINMIPS_PRIORITY 10
+#define JFFS2_DYNRUBIN_PRIORITY  20
+#define JFFS2_LZARI_PRIORITY     30
+#define JFFS2_LZO_PRIORITY       40
+#define JFFS2_RTIME_PRIORITY     50
+#define JFFS2_ZLIB_PRIORITY      60
+
+#define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
+#define JFFS2_DYNRUBIN_DISABLED  /*        for decompression */
+
+#define JFFS2_COMPR_MODE_NONE       0
+#define JFFS2_COMPR_MODE_PRIORITY   1
+#define JFFS2_COMPR_MODE_SIZE       2
+
+struct jffs2_compressor {
+        struct list_head list;
+        int priority;              /* used by prirority comr. mode */
+        char *name;
+        char compr;                /* JFFS2_COMPR_XXX */
+        int (*compress)(unsigned char *data_in, unsigned char *cpage_out,
+                        uint32_t *srclen, uint32_t *destlen, void *model);
+        int (*decompress)(unsigned char *cdata_in, unsigned char *data_out,
+                        uint32_t cdatalen, uint32_t datalen, void *model);
+        int usecount;
+        int disabled;              /* if seted the compressor won't compress */
+        unsigned char *compr_buf;  /* used by size compr. mode */
+        uint32_t compr_buf_size;   /* used by size compr. mode */
+        uint32_t stat_compr_orig_size;
+        uint32_t stat_compr_new_size;
+        uint32_t stat_compr_blocks;
+        uint32_t stat_decompr_blocks;
+};
+
+int jffs2_register_compressor(struct jffs2_compressor *comp);
+int jffs2_unregister_compressor(struct jffs2_compressor *comp);
+
+int jffs2_compressors_init(void);
+int jffs2_compressors_exit(void);
+
+uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+                             unsigned char *data_in, unsigned char **cpage_out,
+                             uint32_t *datalen, uint32_t *cdatalen);
+
+int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+                     uint16_t comprtype, unsigned char *cdata_in,
+                     unsigned char *data_out, uint32_t cdatalen, uint32_t datalen);
+
+void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig);
+
+#ifdef CONFIG_JFFS2_PROC
+int jffs2_enable_compressor_name(const char *name);
+int jffs2_disable_compressor_name(const char *name);
+int jffs2_set_compression_mode_name(const char *mode_name);
+char *jffs2_get_compression_mode_name(void);
+int jffs2_set_compressor_priority(const char *mode_name, int priority);
+char *jffs2_list_compressors(void);
+char *jffs2_stats(void);
+#endif
+
+/* Compressor modules */
+/* These functions will be called by jffs2_compressors_init/exit */
+
+#ifdef CONFIG_JFFS2_RUBIN
+int jffs2_rubinmips_init(void);
+void jffs2_rubinmips_exit(void);
+int jffs2_dynrubin_init(void);
+void jffs2_dynrubin_exit(void);
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+int jffs2_rtime_init(void);
+void jffs2_rtime_exit(void);
+#endif
+#ifdef CONFIG_JFFS2_ZLIB
+int jffs2_zlib_init(void);
+void jffs2_zlib_exit(void);
+#endif
+#ifdef CONFIG_JFFS2_LZARI
+int jffs2_lzari_init(void);
+void jffs2_lzari_exit(void);
+#endif
+#ifdef CONFIG_JFFS2_LZO
+int jffs2_lzo_init(void);
+void jffs2_lzo_exit(void);
+#endif
+
+#endif /* __JFFS2_COMPR_H__ */
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
new file mode 100644
index 000000000000..393129418666
--- /dev/null
+++ b/fs/jffs2/compr_rtime.c
@@ -0,0 +1,132 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by Arjan van de Ven <arjanv@redhat.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: compr_rtime.c,v 1.14 2004/06/23 16:34:40 havasi Exp $
+ *
+ *
+ * Very simple lz77-ish encoder.
+ *
+ * Theory of operation: Both encoder and decoder have a list of "last
+ * occurrences" for every possible source-value; after sending the
+ * first source-byte, the second byte indicated the "run" length of
+ * matches
+ *
+ * The algorithm is intended to only send "whole bytes", no bit-messing.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h> 
+#include <linux/jffs2.h> 
+#include "compr.h"
+
+/* _compress returns the compressed size, -1 if bigger */
+static int jffs2_rtime_compress(unsigned char *data_in,
+				unsigned char *cpage_out,
+				uint32_t *sourcelen, uint32_t *dstlen,
+				void *model)
+{
+	short positions[256];
+	int outpos = 0;
+	int pos=0;
+
+	memset(positions,0,sizeof(positions)); 
+	
+	while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
+		int backpos, runlen=0;
+		unsigned char value;
+		
+		value = data_in[pos];
+
+		cpage_out[outpos++] = data_in[pos++];
+		
+		backpos = positions[value];
+		positions[value]=pos;
+		
+		while ((backpos < pos) && (pos < (*sourcelen)) &&
+		       (data_in[pos]==data_in[backpos++]) && (runlen<255)) {
+			pos++;
+			runlen++;
+		}
+		cpage_out[outpos++] = runlen;
+	}
+
+	if (outpos >= pos) {
+		/* We failed */
+		return -1;
+	}
+	
+	/* Tell the caller how much we managed to compress, and how much space it took */
+	*sourcelen = pos;
+	*dstlen = outpos;
+	return 0;
+}		   
+
+
+static int jffs2_rtime_decompress(unsigned char *data_in,
+				  unsigned char *cpage_out,
+				  uint32_t srclen, uint32_t destlen,
+				  void *model)
+{
+	short positions[256];
+	int outpos = 0;
+	int pos=0;
+	
+	memset(positions,0,sizeof(positions)); 
+	
+	while (outpos<destlen) {
+		unsigned char value;
+		int backoffs;
+		int repeat;
+		
+		value = data_in[pos++];
+		cpage_out[outpos++] = value; /* first the verbatim copied byte */
+		repeat = data_in[pos++];
+		backoffs = positions[value];
+		
+		positions[value]=outpos;
+		if (repeat) {
+			if (backoffs + repeat >= outpos) {
+				while(repeat) {
+					cpage_out[outpos++] = cpage_out[backoffs++];
+					repeat--;
+				}
+			} else {
+				memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat);
+				outpos+=repeat;		
+			}
+		}
+	}
+        return 0;
+}		   
+
+static struct jffs2_compressor jffs2_rtime_comp = {
+    .priority = JFFS2_RTIME_PRIORITY,
+    .name = "rtime",
+    .compr = JFFS2_COMPR_RTIME,
+    .compress = &jffs2_rtime_compress,
+    .decompress = &jffs2_rtime_decompress,
+#ifdef JFFS2_RTIME_DISABLED
+    .disabled = 1,
+#else
+    .disabled = 0,
+#endif
+};
+
+int jffs2_rtime_init(void)
+{
+    return jffs2_register_compressor(&jffs2_rtime_comp);
+}
+
+void jffs2_rtime_exit(void)
+{
+    jffs2_unregister_compressor(&jffs2_rtime_comp);
+}
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
new file mode 100644
index 000000000000..450d6624181f
--- /dev/null
+++ b/fs/jffs2/compr_rubin.c
@@ -0,0 +1,373 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
+ *
+ * Created by Arjan van de Ven <arjanv@redhat.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: compr_rubin.c,v 1.20 2004/06/23 16:34:40 havasi Exp $
+ *
+ */
+
+ 
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/jffs2.h>
+#include "compr_rubin.h"
+#include "histo_mips.h"
+#include "compr.h"
+
+static void init_rubin(struct rubin_state *rs, int div, int *bits)
+{	
+	int c;
+
+	rs->q = 0;
+	rs->p = (long) (2 * UPPER_BIT_RUBIN);
+	rs->bit_number = (long) 0;
+	rs->bit_divider = div;
+	for (c=0; c<8; c++)
+		rs->bits[c] = bits[c];
+}
+
+
+static int encode(struct rubin_state *rs, long A, long B, int symbol)
+{
+
+	long i0, i1;
+	int ret;
+
+	while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
+		rs->bit_number++;
+		
+		ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0);
+		if (ret)
+			return ret;
+		rs->q &= LOWER_BITS_RUBIN;
+		rs->q <<= 1;
+		rs->p <<= 1;
+	}
+	i0 = A * rs->p / (A + B);
+	if (i0 <= 0) {
+		i0 = 1;
+	}
+	if (i0 >= rs->p) {
+		i0 = rs->p - 1;
+	}
+	i1 = rs->p - i0;
+
+	if (symbol == 0)
+		rs->p = i0;
+	else {
+		rs->p = i1;
+		rs->q += i0;
+	}
+	return 0;
+}
+
+
+static void end_rubin(struct rubin_state *rs)
+{				
+
+	int i;
+
+	for (i = 0; i < RUBIN_REG_SIZE; i++) {
+		pushbit(&rs->pp, (UPPER_BIT_RUBIN & rs->q) ? 1 : 0, 1);
+		rs->q &= LOWER_BITS_RUBIN;
+		rs->q <<= 1;
+	}
+}
+
+
+static void init_decode(struct rubin_state *rs, int div, int *bits)
+{
+	init_rubin(rs, div, bits);		
+
+	/* behalve lower */
+	rs->rec_q = 0;
+
+	for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE; rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp)))
+		;
+}
+
+static void __do_decode(struct rubin_state *rs, unsigned long p, unsigned long q)
+{
+	register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN;
+	unsigned long rec_q;
+	int c, bits = 0;
+
+	/*
+	 * First, work out how many bits we need from the input stream.
+	 * Note that we have already done the initial check on this
+	 * loop prior to calling this function.
+	 */
+	do {
+		bits++;
+		q &= lower_bits_rubin;
+		q <<= 1;
+		p <<= 1;
+	} while ((q >= UPPER_BIT_RUBIN) || ((p + q) <= UPPER_BIT_RUBIN));
+
+	rs->p = p;
+	rs->q = q;
+
+	rs->bit_number += bits;
+
+	/*
+	 * Now get the bits.  We really want this to be "get n bits".
+	 */
+	rec_q = rs->rec_q;
+	do {
+		c = pullbit(&rs->pp);
+		rec_q &= lower_bits_rubin;
+		rec_q <<= 1;
+		rec_q += c;
+	} while (--bits);
+	rs->rec_q = rec_q;
+}
+
+static int decode(struct rubin_state *rs, long A, long B)
+{
+	unsigned long p = rs->p, q = rs->q;
+	long i0, threshold;
+	int symbol;
+
+	if (q >= UPPER_BIT_RUBIN || ((p + q) <= UPPER_BIT_RUBIN))
+		__do_decode(rs, p, q);
+
+	i0 = A * rs->p / (A + B);
+	if (i0 <= 0) {
+		i0 = 1;
+	}
+	if (i0 >= rs->p) {
+		i0 = rs->p - 1;
+	}
+
+	threshold = rs->q + i0;
+	symbol = rs->rec_q >= threshold;
+	if (rs->rec_q >= threshold) {
+		rs->q += i0;
+		i0 = rs->p - i0;
+	}
+
+	rs->p = i0;
+
+	return symbol;
+}
+
+
+
+static int out_byte(struct rubin_state *rs, unsigned char byte)
+{
+	int i, ret;
+	struct rubin_state rs_copy;
+	rs_copy = *rs;
+
+	for (i=0;i<8;i++) {
+		ret = encode(rs, rs->bit_divider-rs->bits[i],rs->bits[i],byte&1);
+		if (ret) {
+			/* Failed. Restore old state */
+			*rs = rs_copy;
+			return ret;
+		}
+		byte=byte>>1;
+	}
+	return 0;
+}
+
+static int in_byte(struct rubin_state *rs)
+{
+	int i, result = 0, bit_divider = rs->bit_divider;
+
+	for (i = 0; i < 8; i++)
+		result |= decode(rs, bit_divider - rs->bits[i], rs->bits[i]) << i;
+
+	return result;
+}
+
+
+
+static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, 
+		      unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen)
+	{
+	int outpos = 0;
+	int pos=0;
+	struct rubin_state rs;
+
+	init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32);
+
+	init_rubin(&rs, bit_divider, bits);
+	
+	while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos]))
+		pos++;
+	
+	end_rubin(&rs);
+
+	if (outpos > pos) {
+		/* We failed */
+		return -1;
+	}
+	
+	/* Tell the caller how much we managed to compress, 
+	 * and how much space it took */
+	
+	outpos = (pushedbits(&rs.pp)+7)/8;
+	
+	if (outpos >= pos)
+		return -1; /* We didn't actually compress */
+	*sourcelen = pos;
+	*dstlen = outpos;
+	return 0;
+}		   
+#if 0
+/* _compress returns the compressed size, -1 if bigger */
+int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, 
+		   uint32_t *sourcelen, uint32_t *dstlen, void *model)
+{
+	return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
+}
+#endif
+int jffs2_dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out, 
+		   uint32_t *sourcelen, uint32_t *dstlen, void *model)
+{
+	int bits[8];
+	unsigned char histo[256];
+	int i;
+	int ret;
+	uint32_t mysrclen, mydstlen;
+
+	mysrclen = *sourcelen;
+	mydstlen = *dstlen - 8;
+
+	if (*dstlen <= 12)
+		return -1;
+
+	memset(histo, 0, 256);
+	for (i=0; i<mysrclen; i++) {
+		histo[data_in[i]]++;
+	}
+	memset(bits, 0, sizeof(int)*8);
+	for (i=0; i<256; i++) {
+		if (i&128)
+			bits[7] += histo[i];
+		if (i&64)
+			bits[6] += histo[i];
+		if (i&32)
+			bits[5] += histo[i];
+		if (i&16)
+			bits[4] += histo[i];
+		if (i&8)
+			bits[3] += histo[i];
+		if (i&4)
+			bits[2] += histo[i];
+		if (i&2)
+			bits[1] += histo[i];
+		if (i&1)
+			bits[0] += histo[i];
+	}
+
+	for (i=0; i<8; i++) {
+		bits[i] = (bits[i] * 256) / mysrclen;
+		if (!bits[i]) bits[i] = 1;
+		if (bits[i] > 255) bits[i] = 255;
+		cpage_out[i] = bits[i];
+	}
+
+	ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen);
+	if (ret) 
+		return ret;
+
+	/* Add back the 8 bytes we took for the probabilities */
+	mydstlen += 8;
+
+	if (mysrclen <= mydstlen) {
+		/* We compressed */
+		return -1;
+	}
+
+	*sourcelen = mysrclen;
+	*dstlen = mydstlen;
+	return 0;
+}
+
+static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, 
+			 unsigned char *page_out, uint32_t srclen, uint32_t destlen)
+{
+	int outpos = 0;
+	struct rubin_state rs;
+	
+	init_pushpull(&rs.pp, cdata_in, srclen, 0, 0);
+	init_decode(&rs, bit_divider, bits);
+	
+	while (outpos < destlen) {
+		page_out[outpos++] = in_byte(&rs);
+	}
+}		   
+
+
+int jffs2_rubinmips_decompress(unsigned char *data_in, unsigned char *cpage_out, 
+		   uint32_t sourcelen, uint32_t dstlen, void *model)
+{
+	rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
+        return 0;
+}
+
+int jffs2_dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out, 
+		   uint32_t sourcelen, uint32_t dstlen, void *model)
+{
+	int bits[8];
+	int c;
+
+	for (c=0; c<8; c++)
+		bits[c] = data_in[c];
+
+	rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, dstlen);
+        return 0;
+}
+
+static struct jffs2_compressor jffs2_rubinmips_comp = {
+    .priority = JFFS2_RUBINMIPS_PRIORITY,
+    .name = "rubinmips",
+    .compr = JFFS2_COMPR_DYNRUBIN,
+    .compress = NULL, /*&jffs2_rubinmips_compress,*/
+    .decompress = &jffs2_rubinmips_decompress,
+#ifdef JFFS2_RUBINMIPS_DISABLED
+    .disabled = 1,
+#else
+    .disabled = 0,
+#endif
+};
+
+int jffs2_rubinmips_init(void)
+{
+    return jffs2_register_compressor(&jffs2_rubinmips_comp);
+}
+
+void jffs2_rubinmips_exit(void)
+{
+    jffs2_unregister_compressor(&jffs2_rubinmips_comp);
+}
+
+static struct jffs2_compressor jffs2_dynrubin_comp = {
+    .priority = JFFS2_DYNRUBIN_PRIORITY,
+    .name = "dynrubin",
+    .compr = JFFS2_COMPR_RUBINMIPS,
+    .compress = jffs2_dynrubin_compress,
+    .decompress = &jffs2_dynrubin_decompress,
+#ifdef JFFS2_DYNRUBIN_DISABLED
+    .disabled = 1,
+#else
+    .disabled = 0,
+#endif
+};
+
+int jffs2_dynrubin_init(void)
+{
+    return jffs2_register_compressor(&jffs2_dynrubin_comp);
+}
+
+void jffs2_dynrubin_exit(void)
+{
+    jffs2_unregister_compressor(&jffs2_dynrubin_comp);
+}
diff --git a/fs/jffs2/compr_rubin.h b/fs/jffs2/compr_rubin.h
new file mode 100644
index 000000000000..cf51e34f6574
--- /dev/null
+++ b/fs/jffs2/compr_rubin.h
@@ -0,0 +1,21 @@
+/* Rubin encoder/decoder header       */
+/* work started at   : aug   3, 1994  */
+/* last modification : aug  15, 1994  */
+/* $Id: compr_rubin.h,v 1.6 2002/01/25 01:49:26 dwmw2 Exp $ */
+
+#include "pushpull.h"
+
+#define RUBIN_REG_SIZE   16
+#define UPPER_BIT_RUBIN    (((long) 1)<<(RUBIN_REG_SIZE-1))
+#define LOWER_BITS_RUBIN   ((((long) 1)<<(RUBIN_REG_SIZE-1))-1)
+
+
+struct rubin_state {
+	unsigned long p;		
+	unsigned long q;	
+	unsigned long rec_q;
+	long bit_number;
+	struct pushpull pp;
+	int bit_divider;
+	int bits[8];
+};
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
new file mode 100644
index 000000000000..9f9932c22adb
--- /dev/null
+++ b/fs/jffs2/compr_zlib.c
@@ -0,0 +1,218 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: compr_zlib.c,v 1.29 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#if !defined(__KERNEL__) && !defined(__ECOS)
+#error "The userspace support got too messy and was removed. Update your mkfs.jffs2"
+#endif
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/zlib.h>
+#include <linux/zutil.h>
+#include <asm/semaphore.h>
+#include "nodelist.h"
+#include "compr.h"
+
+	/* Plan: call deflate() with avail_in == *sourcelen, 
+		avail_out = *dstlen - 12 and flush == Z_FINISH. 
+		If it doesn't manage to finish,	call it again with
+		avail_in == 0 and avail_out set to the remaining 12
+		bytes for it to clean up. 
+	   Q: Is 12 bytes sufficient?
+	*/
+#define STREAM_END_SPACE 12
+
+static DECLARE_MUTEX(deflate_sem);
+static DECLARE_MUTEX(inflate_sem);
+static z_stream inf_strm, def_strm;
+
+#ifdef __KERNEL__ /* Linux-only */
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+
+static int __init alloc_workspaces(void)
+{
+	def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
+	if (!def_strm.workspace) {
+		printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize());
+		return -ENOMEM;
+	}
+	D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize()));
+	inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
+	if (!inf_strm.workspace) {
+		printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize());
+		vfree(def_strm.workspace);
+		return -ENOMEM;
+	}
+	D1(printk(KERN_DEBUG "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize()));
+	return 0;
+}
+
+static void free_workspaces(void)
+{
+	vfree(def_strm.workspace);
+	vfree(inf_strm.workspace);
+}
+#else
+#define alloc_workspaces() (0)
+#define free_workspaces() do { } while(0)
+#endif /* __KERNEL__ */
+
+int jffs2_zlib_compress(unsigned char *data_in, unsigned char *cpage_out, 
+		   uint32_t *sourcelen, uint32_t *dstlen, void *model)
+{
+	int ret;
+
+	if (*dstlen <= STREAM_END_SPACE)
+		return -1;
+
+	down(&deflate_sem);
+
+	if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
+		printk(KERN_WARNING "deflateInit failed\n");
+		up(&deflate_sem);
+		return -1;
+	}
+
+	def_strm.next_in = data_in;
+	def_strm.total_in = 0;
+	
+	def_strm.next_out = cpage_out;
+	def_strm.total_out = 0;
+
+	while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) {
+		def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE);
+		def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out);
+		D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n",
+			  def_strm.avail_in, def_strm.avail_out));
+		ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH);
+		D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", 
+			  def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out));
+		if (ret != Z_OK) {
+			D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
+			zlib_deflateEnd(&def_strm);
+			up(&deflate_sem);
+			return -1;
+		}
+	}
+	def_strm.avail_out += STREAM_END_SPACE;
+	def_strm.avail_in = 0;
+	ret = zlib_deflate(&def_strm, Z_FINISH);
+	zlib_deflateEnd(&def_strm);
+
+	if (ret != Z_STREAM_END) {
+		D1(printk(KERN_DEBUG "final deflate returned %d\n", ret));
+		ret = -1;
+		goto out;
+	}
+
+	if (def_strm.total_out >= def_strm.total_in) {
+		D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld; failing\n",
+			  def_strm.total_in, def_strm.total_out));
+		ret = -1;
+		goto out;
+	}
+
+	D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n",
+		  def_strm.total_in, def_strm.total_out));
+
+	*dstlen = def_strm.total_out;
+	*sourcelen = def_strm.total_in;
+	ret = 0;
+ out:
+	up(&deflate_sem);
+	return ret;
+}
+
+int jffs2_zlib_decompress(unsigned char *data_in, unsigned char *cpage_out,
+		      uint32_t srclen, uint32_t destlen, void *model)
+{
+	int ret;
+	int wbits = MAX_WBITS;
+
+	down(&inflate_sem);
+
+	inf_strm.next_in = data_in;
+	inf_strm.avail_in = srclen;
+	inf_strm.total_in = 0;
+	
+	inf_strm.next_out = cpage_out;
+	inf_strm.avail_out = destlen;
+	inf_strm.total_out = 0;
+
+	/* If it's deflate, and it's got no preset dictionary, then
+	   we can tell zlib to skip the adler32 check. */
+	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
+	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
+	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
+
+		D2(printk(KERN_DEBUG "inflate skipping adler32\n"));
+		wbits = -((data_in[0] >> 4) + 8);
+		inf_strm.next_in += 2;
+		inf_strm.avail_in -= 2;
+	} else {
+		/* Let this remain D1 for now -- it should never happen */
+		D1(printk(KERN_DEBUG "inflate not skipping adler32\n"));
+	}
+
+
+	if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
+		printk(KERN_WARNING "inflateInit failed\n");
+		up(&inflate_sem);
+		return 1;
+	}
+
+	while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK)
+		;
+	if (ret != Z_STREAM_END) {
+		printk(KERN_NOTICE "inflate returned %d\n", ret);
+	}
+	zlib_inflateEnd(&inf_strm);
+	up(&inflate_sem);
+        return 0;
+}
+
+static struct jffs2_compressor jffs2_zlib_comp = {
+    .priority = JFFS2_ZLIB_PRIORITY,
+    .name = "zlib",
+    .compr = JFFS2_COMPR_ZLIB,
+    .compress = &jffs2_zlib_compress,
+    .decompress = &jffs2_zlib_decompress,
+#ifdef JFFS2_ZLIB_DISABLED
+    .disabled = 1,
+#else
+    .disabled = 0,
+#endif
+};
+
+int __init jffs2_zlib_init(void)
+{
+    int ret;
+
+    ret = alloc_workspaces();
+    if (ret)
+        return ret;
+
+    ret = jffs2_register_compressor(&jffs2_zlib_comp);
+    if (ret)
+        free_workspaces();
+
+    return ret;
+}
+
+void jffs2_zlib_exit(void)
+{
+    jffs2_unregister_compressor(&jffs2_zlib_comp);
+    free_workspaces();
+}
diff --git a/fs/jffs2/comprtest.c b/fs/jffs2/comprtest.c
new file mode 100644
index 000000000000..cf51f091d0e7
--- /dev/null
+++ b/fs/jffs2/comprtest.c
@@ -0,0 +1,307 @@
+/* $Id: comprtest.c,v 1.5 2002/01/03 15:20:44 dwmw2 Exp $ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/types.h>
+#if 0
+#define TESTDATA_LEN 512
+static unsigned char testdata[TESTDATA_LEN] = {
+ 0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x60, 0x83, 0x04, 0x08, 0x34, 0x00, 0x00, 0x00,
+ 0xb0, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x20, 0x00, 0x06, 0x00, 0x28, 0x00,
+ 0x1e, 0x00, 0x1b, 0x00, 0x06, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x34, 0x80, 0x04, 0x08,
+ 0x34, 0x80, 0x04, 0x08, 0xc0, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xf4, 0x00, 0x00, 0x00, 0xf4, 0x80, 0x04, 0x08,
+ 0xf4, 0x80, 0x04, 0x08, 0x13, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x04, 0x08,
+ 0x00, 0x80, 0x04, 0x08, 0x0d, 0x05, 0x00, 0x00, 0x0d, 0x05, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x05, 0x00, 0x00, 0x10, 0x95, 0x04, 0x08,
+ 0x10, 0x95, 0x04, 0x08, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x58, 0x05, 0x00, 0x00, 0x58, 0x95, 0x04, 0x08,
+ 0x58, 0x95, 0x04, 0x08, 0xa0, 0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x08, 0x81, 0x04, 0x08,
+ 0x08, 0x81, 0x04, 0x08, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x2f, 0x6c, 0x69, 0x62, 0x2f, 0x6c, 0x64, 0x2d, 0x6c, 0x69, 0x6e, 0x75,
+ 0x78, 0x2e, 0x73, 0x6f, 0x2e, 0x32, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4e, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00,
+ 0x0c, 0x83, 0x04, 0x08, 0x81, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
+ 0x1c, 0x83, 0x04, 0x08, 0xac, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00,
+ 0x2c, 0x83, 0x04, 0x08, 0xdd, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00,
+ 0x3c, 0x83, 0x04, 0x08, 0x2e, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x4c, 0x83, 0x04, 0x08, 0x7d, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00,
+ 0x00, 0x85, 0x04, 0x08, 0x04, 0x00, 0x00, 0x00, 0x11, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x5f, 0x67,
+ 0x6d, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x5f, 0x00, 0x6c, 0x69, 0x62, 0x63,
+ 0x2e, 0x73, 0x6f, 0x2e, 0x36, 0x00, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x00, 0x5f, 0x5f, 0x63};
+#else
+#define TESTDATA_LEN 3481
+static unsigned char testdata[TESTDATA_LEN] = {
+ 0x23, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x22, 0x64, 0x62, 0x65, 0x6e, 0x63, 0x68,
+ 0x2e, 0x68, 0x22, 0x0a, 0x0a, 0x23, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x20, 0x4d, 0x41, 0x58,
+ 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x20, 0x31, 0x30, 0x30, 0x30, 0x0a, 0x0a, 0x73, 0x74, 0x61,
+ 0x74, 0x69, 0x63, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x62, 0x75, 0x66, 0x5b, 0x37, 0x30, 0x30,
+ 0x30, 0x30, 0x5d, 0x3b, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x20, 0x69, 0x6e, 0x74, 0x20,
+ 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x73, 0x74, 0x61,
+ 0x74, 0x69, 0x63, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x20, 0x7b, 0x0a, 0x09, 0x69, 0x6e,
+ 0x74, 0x20, 0x66, 0x64, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x3b, 0x0a, 0x7d, 0x20, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x4d, 0x41, 0x58, 0x5f,
+ 0x46, 0x49, 0x4c, 0x45, 0x53, 0x5d, 0x3b, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f,
+ 0x5f, 0x75, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e,
+ 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72,
+ 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x75,
+ 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x20, 0x21, 0x3d, 0x20,
+ 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28,
+ 0x25, 0x64, 0x29, 0x20, 0x75, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x20, 0x25, 0x73, 0x20, 0x66, 0x61,
+ 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09,
+ 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a,
+ 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x66,
+ 0x69, 0x6c, 0x65, 0x28, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x64, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20,
+ 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x3b, 0x0a,
+ 0x09, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a,
+ 0x09, 0x09, 0x73, 0x20, 0x3d, 0x20, 0x4d, 0x49, 0x4e, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x6f, 0x66,
+ 0x28, 0x62, 0x75, 0x66, 0x29, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x09,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73,
+ 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x2d, 0x3d, 0x20, 0x73, 0x3b, 0x0a,
+ 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20,
+ 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20,
+ 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x64, 0x2c,
+ 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x20, 0x3d,
+ 0x20, 0x4f, 0x5f, 0x52, 0x44, 0x57, 0x52, 0x7c, 0x4f, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x3b,
+ 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x20, 0x73, 0x74, 0x61, 0x74, 0x20, 0x73, 0x74,
+ 0x3b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28,
+ 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69,
+ 0x7a, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x20, 0x7c,
+ 0x3d, 0x20, 0x4f, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x3b, 0x0a, 0x0a, 0x09, 0x66, 0x64, 0x20,
+ 0x3d, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x66, 0x6c,
+ 0x61, 0x67, 0x73, 0x2c, 0x20, 0x30, 0x36, 0x30, 0x30, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20,
+ 0x28, 0x66, 0x64, 0x20, 0x3d, 0x3d, 0x20, 0x2d, 0x31, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70,
+ 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x6f, 0x70, 0x65, 0x6e,
+ 0x20, 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x68,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22,
+ 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x68,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28,
+ 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x66, 0x73, 0x74, 0x61, 0x74, 0x28, 0x66, 0x64, 0x2c,
+ 0x20, 0x26, 0x73, 0x74, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65,
+ 0x20, 0x3e, 0x20, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b,
+ 0x0a, 0x23, 0x69, 0x66, 0x20, 0x44, 0x45, 0x42, 0x55, 0x47, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69,
+ 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64,
+ 0x69, 0x6e, 0x67, 0x20, 0x25, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x25, 0x64, 0x20, 0x66, 0x72, 0x6f,
+ 0x6d, 0x20, 0x25, 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66,
+ 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e, 0x74,
+ 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x23, 0x65,
+ 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x09, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x66, 0x69,
+ 0x6c, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x74,
+ 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x20, 0x65, 0x6c,
+ 0x73, 0x65, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x3c, 0x20, 0x73, 0x74,
+ 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72,
+ 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6e, 0x67,
+ 0x20, 0x25, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x25, 0x64, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x25,
+ 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e,
+ 0x74, 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09,
+ 0x09, 0x66, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x73,
+ 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69,
+ 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b, 0x69,
+ 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d, 0x20,
+ 0x30, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66,
+ 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53,
+ 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x66, 0x69,
+ 0x6c, 0x65, 0x20, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x66, 0x75, 0x6c, 0x6c, 0x20, 0x66, 0x6f,
+ 0x72, 0x20, 0x25, 0x73, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b,
+ 0x0a, 0x09, 0x09, 0x65, 0x78, 0x69, 0x74, 0x28, 0x31, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09,
+ 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65,
+ 0x20, 0x3d, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x3b, 0x0a, 0x09, 0x66, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x20, 0x3d, 0x20, 0x66, 0x64, 0x3b, 0x0a, 0x09,
+ 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2b, 0x2b, 0x20, 0x25, 0x20, 0x31, 0x30,
+ 0x30, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e,
+ 0x74, 0x66, 0x28, 0x22, 0x2e, 0x22, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76,
+ 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x69, 0x6e, 0x74,
+ 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x7a,
+ 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x0a, 0x7b,
+ 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x62,
+ 0x75, 0x66, 0x5b, 0x30, 0x5d, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x6d, 0x65, 0x6d, 0x73,
+ 0x65, 0x74, 0x28, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x31, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x6f,
+ 0x66, 0x28, 0x62, 0x75, 0x66, 0x29, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28,
+ 0x69, 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b,
+ 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d,
+ 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a,
+ 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58,
+ 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x23, 0x69, 0x66, 0x20, 0x31, 0x0a,
+ 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64,
+ 0x6f, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20,
+ 0x25, 0x64, 0x20, 0x77, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x20,
+ 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25, 0x64, 0x20, 0x6f, 0x66, 0x73, 0x3d, 0x25, 0x64, 0x5c, 0x6e,
+ 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e,
+ 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c,
+ 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x3b, 0x0a,
+ 0x23, 0x65, 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b,
+ 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x6c, 0x73, 0x65, 0x65, 0x6b, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x2c,
+ 0x20, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x53, 0x45, 0x54, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20,
+ 0x28, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d,
+ 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20,
+ 0x21, 0x3d, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69,
+ 0x6e, 0x74, 0x66, 0x28, 0x22, 0x77, 0x72, 0x69, 0x74, 0x65, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65,
+ 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x5c, 0x6e,
+ 0x22, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d,
+ 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x28, 0x69,
+ 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x73,
+ 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29,
+ 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20,
+ 0x28, 0x69, 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53,
+ 0x3b, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d,
+ 0x3d, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b,
+ 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41,
+ 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69,
+ 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x65, 0x61,
+ 0x64, 0x3a, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x77, 0x61, 0x73,
+ 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25,
+ 0x64, 0x20, 0x6f, 0x66, 0x73, 0x3d, 0x25, 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c,
+ 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75,
+ 0x72, 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x6c, 0x73, 0x65, 0x65, 0x6b, 0x28, 0x66, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73,
+ 0x65, 0x74, 0x2c, 0x20, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x53, 0x45, 0x54, 0x29, 0x3b, 0x0a, 0x09,
+ 0x72, 0x65, 0x61, 0x64, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66,
+ 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x7d,
+ 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28,
+ 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69,
+ 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x3d, 0x30, 0x3b,
+ 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b, 0x69, 0x2b, 0x2b, 0x29,
+ 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b,
+ 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x68, 0x61, 0x6e,
+ 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09,
+ 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c,
+ 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22,
+ 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x3a, 0x20, 0x68,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x77, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x74,
+ 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20,
+ 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28, 0x66, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x29, 0x3b, 0x0a, 0x09, 0x66, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x20,
+ 0x30, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x6d, 0x6b,
+ 0x64, 0x69, 0x72, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29,
+ 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61,
+ 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x6d, 0x6b, 0x64, 0x69, 0x72,
+ 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x30, 0x37, 0x30, 0x30, 0x29, 0x20, 0x21, 0x3d,
+ 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x23, 0x69, 0x66, 0x20, 0x44, 0x45, 0x42, 0x55, 0x47, 0x0a,
+ 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x6d, 0x6b, 0x64, 0x69, 0x72, 0x20,
+ 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e,
+ 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6e, 0x61,
+ 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72,
+ 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x23, 0x65, 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x7d, 0x0a,
+ 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x6d, 0x64, 0x69, 0x72,
+ 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x7b, 0x0a,
+ 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29,
+ 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x72, 0x6d, 0x64, 0x69, 0x72, 0x28, 0x66, 0x6e,
+ 0x61, 0x6d, 0x65, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70,
+ 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x72, 0x6d, 0x64, 0x69, 0x72, 0x20, 0x25, 0x73, 0x20,
+ 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20,
+ 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c,
+ 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29,
+ 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f,
+ 0x5f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x6f, 0x6c,
+ 0x64, 0x2c, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x6e, 0x65, 0x77, 0x29, 0x0a, 0x7b, 0x0a,
+ 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x6f, 0x6c, 0x64, 0x29, 0x3b, 0x0a,
+ 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x6e, 0x65, 0x77, 0x29, 0x3b, 0x0a,
+ 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x28, 0x6f, 0x6c, 0x64,
+ 0x2c, 0x20, 0x6e, 0x65, 0x77, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09,
+ 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x20,
+ 0x25, 0x73, 0x20, 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73,
+ 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x6f, 0x6c, 0x64, 0x2c, 0x20, 0x6e, 0x65, 0x77, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d,
+ 0x0a, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x28,
+ 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74,
+ 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x20, 0x73, 0x74, 0x61, 0x74, 0x20, 0x73, 0x74, 0x3b, 0x0a, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75,
+ 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69,
+ 0x66, 0x20, 0x28, 0x73, 0x74, 0x61, 0x74, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x26,
+ 0x73, 0x74, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72,
+ 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74,
+ 0x61, 0x74, 0x3a, 0x20, 0x25, 0x73, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25, 0x64, 0x20, 0x25,
+ 0x73, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d,
+ 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74,
+ 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x53, 0x5f, 0x49,
+ 0x53, 0x44, 0x49, 0x52, 0x28, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x29,
+ 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28,
+ 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x21, 0x3d, 0x20, 0x73, 0x69,
+ 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22,
+ 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x3a, 0x20, 0x25, 0x73,
+ 0x20, 0x77, 0x72, 0x6f, 0x6e, 0x67, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x25, 0x64, 0x20, 0x25,
+ 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d,
+ 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e, 0x74, 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a,
+ 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x28,
+ 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74,
+ 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x64, 0x6f, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x35, 0x30, 0x30, 0x30, 0x2c, 0x20, 0x73,
+ 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28,
+ 0x35, 0x30, 0x30, 0x30, 0x29, 0x3b, 0x0a, 0x7d, 0x0a
+};
+#endif
+static unsigned char comprbuf[TESTDATA_LEN];
+static unsigned char decomprbuf[TESTDATA_LEN];
+
+int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in, 
+		     unsigned char *data_out, uint32_t cdatalen, uint32_t datalen);
+unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out, 
+			     uint32_t *datalen, uint32_t *cdatalen);
+
+int init_module(void ) {
+	unsigned char comprtype;
+	uint32_t c, d;
+	int ret;
+
+	printk("Original data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+	       testdata[0],testdata[1],testdata[2],testdata[3], 
+	       testdata[4],testdata[5],testdata[6],testdata[7], 
+	       testdata[8],testdata[9],testdata[10],testdata[11], 
+	       testdata[12],testdata[13],testdata[14],testdata[15]); 
+	d = TESTDATA_LEN;
+	c = TESTDATA_LEN;
+	comprtype = jffs2_compress(testdata, comprbuf, &d, &c);
+
+	printk("jffs2_compress used compression type %d. Compressed size %d, uncompressed size %d\n",
+	       comprtype, c, d);
+	printk("Compressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+	       comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3], 
+	       comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7], 
+	       comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11], 
+	       comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]); 
+
+	ret = jffs2_decompress(comprtype, comprbuf, decomprbuf, c, d);
+	printk("jffs2_decompress returned %d\n", ret);
+	printk("Decompressed data:  %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+	       decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3], 
+	       decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7], 
+	       decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11], 
+	       decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]); 
+	if (memcmp(decomprbuf, testdata, d))
+		printk("Compression and decompression corrupted data\n");
+	else
+		printk("Compression good for %d bytes\n", d);
+	return 1;
+}
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
new file mode 100644
index 000000000000..757306fa3ff4
--- /dev/null
+++ b/fs/jffs2/dir.c
@@ -0,0 +1,799 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: dir.c,v 1.84 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/crc32.h>
+#include <linux/jffs2.h>
+#include <linux/jffs2_fs_i.h>
+#include <linux/jffs2_fs_sb.h>
+#include <linux/time.h>
+#include "nodelist.h"
+
+/* Urgh. Please tell me there's a nicer way of doing these. */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,48)
+typedef int mknod_arg_t;
+#define NAMEI_COMPAT(x) ((void *)x)
+#else
+typedef dev_t mknod_arg_t;
+#define NAMEI_COMPAT(x) (x)
+#endif
+
+static int jffs2_readdir (struct file *, void *, filldir_t);
+
+static int jffs2_create (struct inode *,struct dentry *,int,
+			 struct nameidata *);
+static struct dentry *jffs2_lookup (struct inode *,struct dentry *,
+				    struct nameidata *);
+static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
+static int jffs2_unlink (struct inode *,struct dentry *);
+static int jffs2_symlink (struct inode *,struct dentry *,const char *);
+static int jffs2_mkdir (struct inode *,struct dentry *,int);
+static int jffs2_rmdir (struct inode *,struct dentry *);
+static int jffs2_mknod (struct inode *,struct dentry *,int,mknod_arg_t);
+static int jffs2_rename (struct inode *, struct dentry *,
+                        struct inode *, struct dentry *);
+
+struct file_operations jffs2_dir_operations =
+{
+	.read =		generic_read_dir,
+	.readdir =	jffs2_readdir,
+	.ioctl =	jffs2_ioctl,
+	.fsync =	jffs2_fsync
+};
+
+
+struct inode_operations jffs2_dir_inode_operations =
+{
+	.create =	NAMEI_COMPAT(jffs2_create),
+	.lookup =	NAMEI_COMPAT(jffs2_lookup),
+	.link =		jffs2_link,
+	.unlink =	jffs2_unlink,
+	.symlink =	jffs2_symlink,
+	.mkdir =	jffs2_mkdir,
+	.rmdir =	jffs2_rmdir,
+	.mknod =	jffs2_mknod,
+	.rename =	jffs2_rename,
+	.setattr =	jffs2_setattr,
+};
+
+/***********************************************************************/
+
+
+/* We keep the dirent list sorted in increasing order of name hash,
+   and we use the same hash function as the dentries. Makes this 
+   nice and simple
+*/
+static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
+				   struct nameidata *nd)
+{
+	struct jffs2_inode_info *dir_f;
+	struct jffs2_sb_info *c;
+	struct jffs2_full_dirent *fd = NULL, *fd_list;
+	uint32_t ino = 0;
+	struct inode *inode = NULL;
+
+	D1(printk(KERN_DEBUG "jffs2_lookup()\n"));
+
+	dir_f = JFFS2_INODE_INFO(dir_i);
+	c = JFFS2_SB_INFO(dir_i->i_sb);
+
+	down(&dir_f->sem);
+
+	/* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */
+	for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) {
+		if (fd_list->nhash == target->d_name.hash && 
+		    (!fd || fd_list->version > fd->version) &&
+		    strlen(fd_list->name) == target->d_name.len &&
+		    !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) {
+			fd = fd_list;
+		}
+	}
+	if (fd)
+		ino = fd->ino;
+	up(&dir_f->sem);
+	if (ino) {
+		inode = iget(dir_i->i_sb, ino);
+		if (!inode) {
+			printk(KERN_WARNING "iget() failed for ino #%u\n", ino);
+			return (ERR_PTR(-EIO));
+		}
+	}
+
+	d_add(target, inode);
+
+	return NULL;
+}
+
+/***********************************************************************/
+
+
+static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	struct jffs2_inode_info *f;
+	struct jffs2_sb_info *c;
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct jffs2_full_dirent *fd;
+	unsigned long offset, curofs;
+
+	D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_dentry->d_inode->i_ino));
+
+	f = JFFS2_INODE_INFO(inode);
+	c = JFFS2_SB_INFO(inode->i_sb);
+
+	offset = filp->f_pos;
+
+	if (offset == 0) {
+		D1(printk(KERN_DEBUG "Dirent 0: \".\", ino #%lu\n", inode->i_ino));
+		if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
+			goto out;
+		offset++;
+	}
+	if (offset == 1) {
+		unsigned long pino = parent_ino(filp->f_dentry);
+		D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", pino));
+		if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0)
+			goto out;
+		offset++;
+	}
+
+	curofs=1;
+	down(&f->sem);
+	for (fd = f->dents; fd; fd = fd->next) {
+
+		curofs++;
+		/* First loop: curofs = 2; offset = 2 */
+		if (curofs < offset) {
+			D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", 
+				  fd->name, fd->ino, fd->type, curofs, offset));
+			continue;
+		}
+		if (!fd->ino) {
+			D2(printk(KERN_DEBUG "Skipping deletion dirent \"%s\"\n", fd->name));
+			offset++;
+			continue;
+		}
+		D2(printk(KERN_DEBUG "Dirent %ld: \"%s\", ino #%u, type %d\n", offset, fd->name, fd->ino, fd->type));
+		if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0)
+			break;
+		offset++;
+	}
+	up(&f->sem);
+ out:
+	filp->f_pos = offset;
+	return 0;
+}
+
+/***********************************************************************/
+
+
+static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
+			struct nameidata *nd)
+{
+	struct jffs2_raw_inode *ri;
+	struct jffs2_inode_info *f, *dir_f;
+	struct jffs2_sb_info *c;
+	struct inode *inode;
+	int ret;
+
+	ri = jffs2_alloc_raw_inode();
+	if (!ri)
+		return -ENOMEM;
+	
+	c = JFFS2_SB_INFO(dir_i->i_sb);
+
+	D1(printk(KERN_DEBUG "jffs2_create()\n"));
+
+	inode = jffs2_new_inode(dir_i, mode, ri);
+
+	if (IS_ERR(inode)) {
+		D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n"));
+		jffs2_free_raw_inode(ri);
+		return PTR_ERR(inode);
+	}
+
+	inode->i_op = &jffs2_file_inode_operations;
+	inode->i_fop = &jffs2_file_operations;
+	inode->i_mapping->a_ops = &jffs2_file_address_operations;
+	inode->i_mapping->nrpages = 0;
+
+	f = JFFS2_INODE_INFO(inode);
+	dir_f = JFFS2_INODE_INFO(dir_i);
+
+	ret = jffs2_do_create(c, dir_f, f, ri, 
+			      dentry->d_name.name, dentry->d_name.len);
+
+	if (ret) {
+		make_bad_inode(inode);
+		iput(inode);
+		jffs2_free_raw_inode(ri);
+		return ret;
+	}
+
+	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+
+	jffs2_free_raw_inode(ri);
+	d_instantiate(dentry, inode);
+
+	D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
+		  inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages));
+	return 0;
+}
+
+/***********************************************************************/
+
+
+static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb);
+	struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
+	struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode);
+	int ret;
+
+	ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, 
+			       dentry->d_name.len, dead_f);
+	if (dead_f->inocache)
+		dentry->d_inode->i_nlink = dead_f->inocache->nlink;
+	return ret;
+}
+/***********************************************************************/
+
+
+static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_inode->i_sb);
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
+	struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
+	int ret;
+	uint8_t type;
+
+	/* Don't let people make hard links to bad inodes. */
+	if (!f->inocache)
+		return -EIO;
+
+	if (S_ISDIR(old_dentry->d_inode->i_mode))
+		return -EPERM;
+
+	/* XXX: This is ugly */
+	type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
+	if (!type) type = DT_REG;
+
+	ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len);
+
+	if (!ret) {
+		down(&f->sem);
+		old_dentry->d_inode->i_nlink = ++f->inocache->nlink;
+		up(&f->sem);
+		d_instantiate(dentry, old_dentry->d_inode);
+		atomic_inc(&old_dentry->d_inode->i_count);
+	}
+	return ret;
+}
+
+/***********************************************************************/
+
+static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char *target)
+{
+	struct jffs2_inode_info *f, *dir_f;
+	struct jffs2_sb_info *c;
+	struct inode *inode;
+	struct jffs2_raw_inode *ri;
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dnode *fn;
+	struct jffs2_full_dirent *fd;
+	int namelen;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	/* FIXME: If you care. We'd need to use frags for the target
+	   if it grows much more than this */
+	if (strlen(target) > 254)
+		return -EINVAL;
+
+	ri = jffs2_alloc_raw_inode();
+
+	if (!ri)
+		return -ENOMEM;
+	
+	c = JFFS2_SB_INFO(dir_i->i_sb);
+	
+	/* Try to reserve enough space for both node and dirent. 
+	 * Just the node will do for now, though 
+	 */
+	namelen = dentry->d_name.len;
+	ret = jffs2_reserve_space(c, sizeof(*ri) + strlen(target), &phys_ofs, &alloclen, ALLOC_NORMAL);
+
+	if (ret) {
+		jffs2_free_raw_inode(ri);
+		return ret;
+	}
+
+	inode = jffs2_new_inode(dir_i, S_IFLNK | S_IRWXUGO, ri);
+
+	if (IS_ERR(inode)) {
+		jffs2_free_raw_inode(ri);
+		jffs2_complete_reservation(c);
+		return PTR_ERR(inode);
+	}
+
+	inode->i_op = &jffs2_symlink_inode_operations;
+
+	f = JFFS2_INODE_INFO(inode);
+
+	inode->i_size = strlen(target);
+	ri->isize = ri->dsize = ri->csize = cpu_to_je32(inode->i_size);
+	ri->totlen = cpu_to_je32(sizeof(*ri) + inode->i_size);
+	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+
+	ri->compr = JFFS2_COMPR_NONE;
+	ri->data_crc = cpu_to_je32(crc32(0, target, strlen(target)));
+	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+	
+	fn = jffs2_write_dnode(c, f, ri, target, strlen(target), phys_ofs, ALLOC_NORMAL);
+
+	jffs2_free_raw_inode(ri);
+
+	if (IS_ERR(fn)) {
+		/* Eeek. Wave bye bye */
+		up(&f->sem);
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fn);
+	}
+	/* No data here. Only a metadata node, which will be 
+	   obsoleted by the first data write
+	*/
+	f->metadata = fn;
+	up(&f->sem);
+
+	jffs2_complete_reservation(c);
+	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+	if (ret) {
+		/* Eep. */
+		jffs2_clear_inode(inode);
+		return ret;
+	}
+
+	rd = jffs2_alloc_raw_dirent();
+	if (!rd) {
+		/* Argh. Now we treat it like a normal delete */
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return -ENOMEM;
+	}
+
+	dir_f = JFFS2_INODE_INFO(dir_i);
+	down(&dir_f->sem);
+
+	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd->pino = cpu_to_je32(dir_i->i_ino);
+	rd->version = cpu_to_je32(++dir_f->highest_version);
+	rd->ino = cpu_to_je32(inode->i_ino);
+	rd->mctime = cpu_to_je32(get_seconds());
+	rd->nsize = namelen;
+	rd->type = DT_LNK;
+	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+	rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
+
+	fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
+
+	if (IS_ERR(fd)) {
+		/* dirent failed to write. Delete the inode normally 
+		   as if it were the final unlink() */
+		jffs2_complete_reservation(c);
+		jffs2_free_raw_dirent(rd);
+		up(&dir_f->sem);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fd);
+	}
+
+	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+
+	jffs2_free_raw_dirent(rd);
+
+	/* Link the fd into the inode's list, obsoleting an old
+	   one if necessary. */
+	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+	up(&dir_f->sem);
+	jffs2_complete_reservation(c);
+
+	d_instantiate(dentry, inode);
+	return 0;
+}
+
+
+static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
+{
+	struct jffs2_inode_info *f, *dir_f;
+	struct jffs2_sb_info *c;
+	struct inode *inode;
+	struct jffs2_raw_inode *ri;
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dnode *fn;
+	struct jffs2_full_dirent *fd;
+	int namelen;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	mode |= S_IFDIR;
+
+	ri = jffs2_alloc_raw_inode();
+	if (!ri)
+		return -ENOMEM;
+	
+	c = JFFS2_SB_INFO(dir_i->i_sb);
+
+	/* Try to reserve enough space for both node and dirent. 
+	 * Just the node will do for now, though 
+	 */
+	namelen = dentry->d_name.len;
+	ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL);
+
+	if (ret) {
+		jffs2_free_raw_inode(ri);
+		return ret;
+	}
+
+	inode = jffs2_new_inode(dir_i, mode, ri);
+
+	if (IS_ERR(inode)) {
+		jffs2_free_raw_inode(ri);
+		jffs2_complete_reservation(c);
+		return PTR_ERR(inode);
+	}
+
+	inode->i_op = &jffs2_dir_inode_operations;
+	inode->i_fop = &jffs2_dir_operations;
+	/* Directories get nlink 2 at start */
+	inode->i_nlink = 2;
+
+	f = JFFS2_INODE_INFO(inode);
+
+	ri->data_crc = cpu_to_je32(0);
+	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+	
+	fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+
+	jffs2_free_raw_inode(ri);
+
+	if (IS_ERR(fn)) {
+		/* Eeek. Wave bye bye */
+		up(&f->sem);
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fn);
+	}
+	/* No data here. Only a metadata node, which will be 
+	   obsoleted by the first data write
+	*/
+	f->metadata = fn;
+	up(&f->sem);
+
+	jffs2_complete_reservation(c);
+	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+	if (ret) {
+		/* Eep. */
+		jffs2_clear_inode(inode);
+		return ret;
+	}
+	
+	rd = jffs2_alloc_raw_dirent();
+	if (!rd) {
+		/* Argh. Now we treat it like a normal delete */
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return -ENOMEM;
+	}
+
+	dir_f = JFFS2_INODE_INFO(dir_i);
+	down(&dir_f->sem);
+
+	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd->pino = cpu_to_je32(dir_i->i_ino);
+	rd->version = cpu_to_je32(++dir_f->highest_version);
+	rd->ino = cpu_to_je32(inode->i_ino);
+	rd->mctime = cpu_to_je32(get_seconds());
+	rd->nsize = namelen;
+	rd->type = DT_DIR;
+	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+	rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
+
+	fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
+	
+	if (IS_ERR(fd)) {
+		/* dirent failed to write. Delete the inode normally 
+		   as if it were the final unlink() */
+		jffs2_complete_reservation(c);
+		jffs2_free_raw_dirent(rd);
+		up(&dir_f->sem);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fd);
+	}
+
+	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+	dir_i->i_nlink++;
+
+	jffs2_free_raw_dirent(rd);
+
+	/* Link the fd into the inode's list, obsoleting an old
+	   one if necessary. */
+	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+	up(&dir_f->sem);
+	jffs2_complete_reservation(c);
+
+	d_instantiate(dentry, inode);
+	return 0;
+}
+
+static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
+{
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
+	struct jffs2_full_dirent *fd;
+	int ret;
+
+	for (fd = f->dents ; fd; fd = fd->next) {
+		if (fd->ino)
+			return -ENOTEMPTY;
+	}
+	ret = jffs2_unlink(dir_i, dentry);
+	if (!ret)
+		dir_i->i_nlink--;
+	return ret;
+}
+
+static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, mknod_arg_t rdev)
+{
+	struct jffs2_inode_info *f, *dir_f;
+	struct jffs2_sb_info *c;
+	struct inode *inode;
+	struct jffs2_raw_inode *ri;
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dnode *fn;
+	struct jffs2_full_dirent *fd;
+	int namelen;
+	jint16_t dev;
+	int devlen = 0;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	if (!old_valid_dev(rdev))
+		return -EINVAL;
+
+	ri = jffs2_alloc_raw_inode();
+	if (!ri)
+		return -ENOMEM;
+	
+	c = JFFS2_SB_INFO(dir_i->i_sb);
+	
+	if (S_ISBLK(mode) || S_ISCHR(mode)) {
+		dev = cpu_to_je16(old_encode_dev(rdev));
+		devlen = sizeof(dev);
+	}
+	
+	/* Try to reserve enough space for both node and dirent. 
+	 * Just the node will do for now, though 
+	 */
+	namelen = dentry->d_name.len;
+	ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+
+	if (ret) {
+		jffs2_free_raw_inode(ri);
+		return ret;
+	}
+
+	inode = jffs2_new_inode(dir_i, mode, ri);
+
+	if (IS_ERR(inode)) {
+		jffs2_free_raw_inode(ri);
+		jffs2_complete_reservation(c);
+		return PTR_ERR(inode);
+	}
+	inode->i_op = &jffs2_file_inode_operations;
+	init_special_inode(inode, inode->i_mode, rdev);
+
+	f = JFFS2_INODE_INFO(inode);
+
+	ri->dsize = ri->csize = cpu_to_je32(devlen);
+	ri->totlen = cpu_to_je32(sizeof(*ri) + devlen);
+	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+
+	ri->compr = JFFS2_COMPR_NONE;
+	ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen));
+	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+	
+	fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL);
+
+	jffs2_free_raw_inode(ri);
+
+	if (IS_ERR(fn)) {
+		/* Eeek. Wave bye bye */
+		up(&f->sem);
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fn);
+	}
+	/* No data here. Only a metadata node, which will be 
+	   obsoleted by the first data write
+	*/
+	f->metadata = fn;
+	up(&f->sem);
+
+	jffs2_complete_reservation(c);
+	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+	if (ret) {
+		/* Eep. */
+		jffs2_clear_inode(inode);
+		return ret;
+	}
+
+	rd = jffs2_alloc_raw_dirent();
+	if (!rd) {
+		/* Argh. Now we treat it like a normal delete */
+		jffs2_complete_reservation(c);
+		jffs2_clear_inode(inode);
+		return -ENOMEM;
+	}
+
+	dir_f = JFFS2_INODE_INFO(dir_i);
+	down(&dir_f->sem);
+
+	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd->pino = cpu_to_je32(dir_i->i_ino);
+	rd->version = cpu_to_je32(++dir_f->highest_version);
+	rd->ino = cpu_to_je32(inode->i_ino);
+	rd->mctime = cpu_to_je32(get_seconds());
+	rd->nsize = namelen;
+
+	/* XXX: This is ugly. */
+	rd->type = (mode & S_IFMT) >> 12;
+
+	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+	rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
+
+	fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
+	
+	if (IS_ERR(fd)) {
+		/* dirent failed to write. Delete the inode normally 
+		   as if it were the final unlink() */
+		jffs2_complete_reservation(c);
+		jffs2_free_raw_dirent(rd);
+		up(&dir_f->sem);
+		jffs2_clear_inode(inode);
+		return PTR_ERR(fd);
+	}
+
+	dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+
+	jffs2_free_raw_dirent(rd);
+
+	/* Link the fd into the inode's list, obsoleting an old
+	   one if necessary. */
+	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+	up(&dir_f->sem);
+	jffs2_complete_reservation(c);
+
+	d_instantiate(dentry, inode);
+
+	return 0;
+}
+
+static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
+                        struct inode *new_dir_i, struct dentry *new_dentry)
+{
+	int ret;
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
+	struct jffs2_inode_info *victim_f = NULL;
+	uint8_t type;
+
+	/* The VFS will check for us and prevent trying to rename a 
+	 * file over a directory and vice versa, but if it's a directory,
+	 * the VFS can't check whether the victim is empty. The filesystem
+	 * needs to do that for itself.
+	 */
+	if (new_dentry->d_inode) {
+		victim_f = JFFS2_INODE_INFO(new_dentry->d_inode);
+		if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+			struct jffs2_full_dirent *fd;
+
+			down(&victim_f->sem);
+			for (fd = victim_f->dents; fd; fd = fd->next) {
+				if (fd->ino) {
+					up(&victim_f->sem);
+					return -ENOTEMPTY;
+				}
+			}
+			up(&victim_f->sem);
+		}
+	}
+
+	/* XXX: We probably ought to alloc enough space for
+	   both nodes at the same time. Writing the new link, 
+	   then getting -ENOSPC, is quite bad :)
+	*/
+
+	/* Make a hard link */
+	
+	/* XXX: This is ugly */
+	type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
+	if (!type) type = DT_REG;
+
+	ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), 
+			    old_dentry->d_inode->i_ino, type,
+			    new_dentry->d_name.name, new_dentry->d_name.len);
+
+	if (ret)
+		return ret;
+
+	if (victim_f) {
+		/* There was a victim. Kill it off nicely */
+		new_dentry->d_inode->i_nlink--;
+		/* Don't oops if the victim was a dirent pointing to an
+		   inode which didn't exist. */
+		if (victim_f->inocache) {
+			down(&victim_f->sem);
+			victim_f->inocache->nlink--;
+			up(&victim_f->sem);
+		}
+	}
+
+	/* If it was a directory we moved, and there was no victim, 
+	   increase i_nlink on its new parent */
+	if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f)
+		new_dir_i->i_nlink++;
+
+	/* Unlink the original */
+	ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), 
+		      old_dentry->d_name.name, old_dentry->d_name.len, NULL);
+
+	/* We don't touch inode->i_nlink */
+
+	if (ret) {
+		/* Oh shit. We really ought to make a single node which can do both atomically */
+		struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
+		down(&f->sem);
+		old_dentry->d_inode->i_nlink++;
+		if (f->inocache)
+			f->inocache->nlink++;
+		up(&f->sem);
+
+		printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret);
+		/* Might as well let the VFS know */
+		d_instantiate(new_dentry, old_dentry->d_inode);
+		atomic_inc(&old_dentry->d_inode->i_count);
+		return ret;
+	}
+
+	if (S_ISDIR(old_dentry->d_inode->i_mode))
+		old_dir_i->i_nlink--;
+
+	return 0;
+}
+
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
new file mode 100644
index 000000000000..41451e8bf361
--- /dev/null
+++ b/fs/jffs2/erase.c
@@ -0,0 +1,442 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: erase.c,v 1.66 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/compiler.h>
+#include <linux/crc32.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include "nodelist.h"
+
+struct erase_priv_struct {
+	struct jffs2_eraseblock *jeb;
+	struct jffs2_sb_info *c;
+};
+      
+#ifndef __ECOS
+static void jffs2_erase_callback(struct erase_info *);
+#endif
+static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
+static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+
+static void jffs2_erase_block(struct jffs2_sb_info *c,
+			      struct jffs2_eraseblock *jeb)
+{
+	int ret;
+	uint32_t bad_offset;
+#ifdef __ECOS
+       ret = jffs2_flash_erase(c, jeb);
+       if (!ret) {
+               jffs2_erase_succeeded(c, jeb);
+               return;
+       }
+       bad_offset = jeb->offset;
+#else /* Linux */
+	struct erase_info *instr;
+
+	instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
+	if (!instr) {
+		printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
+		spin_lock(&c->erase_completion_lock);
+		list_del(&jeb->list);
+		list_add(&jeb->list, &c->erase_pending_list);
+		c->erasing_size -= c->sector_size;
+		c->dirty_size += c->sector_size;
+		jeb->dirty_size = c->sector_size;
+		spin_unlock(&c->erase_completion_lock);
+		return;
+	}
+
+	memset(instr, 0, sizeof(*instr));
+
+	instr->mtd = c->mtd;
+	instr->addr = jeb->offset;
+	instr->len = c->sector_size;
+	instr->callback = jffs2_erase_callback;
+	instr->priv = (unsigned long)(&instr[1]);
+	instr->fail_addr = 0xffffffff;
+	
+	((struct erase_priv_struct *)instr->priv)->jeb = jeb;
+	((struct erase_priv_struct *)instr->priv)->c = c;
+
+	ret = c->mtd->erase(c->mtd, instr);
+	if (!ret)
+		return;
+
+	bad_offset = instr->fail_addr;
+	kfree(instr);
+#endif /* __ECOS */
+
+	if (ret == -ENOMEM || ret == -EAGAIN) {
+		/* Erase failed immediately. Refile it on the list */
+		D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
+		spin_lock(&c->erase_completion_lock);
+		list_del(&jeb->list);
+		list_add(&jeb->list, &c->erase_pending_list);
+		c->erasing_size -= c->sector_size;
+		c->dirty_size += c->sector_size;
+		jeb->dirty_size = c->sector_size;
+		spin_unlock(&c->erase_completion_lock);
+		return;
+	}
+
+	if (ret == -EROFS) 
+		printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset);
+	else
+		printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret);
+
+	jffs2_erase_failed(c, jeb, bad_offset);
+}
+
+void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
+{
+	struct jffs2_eraseblock *jeb;
+
+	down(&c->erase_free_sem);
+
+	spin_lock(&c->erase_completion_lock);
+
+	while (!list_empty(&c->erase_complete_list) ||
+	       !list_empty(&c->erase_pending_list)) {
+
+		if (!list_empty(&c->erase_complete_list)) {
+			jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
+			list_del(&jeb->list);
+			spin_unlock(&c->erase_completion_lock);
+			jffs2_mark_erased_block(c, jeb);
+
+			if (!--count) {
+				D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n"));
+				goto done;
+			}
+
+		} else if (!list_empty(&c->erase_pending_list)) {
+			jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
+			D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset));
+			list_del(&jeb->list);
+			c->erasing_size += c->sector_size;
+			c->wasted_size -= jeb->wasted_size;
+			c->free_size -= jeb->free_size;
+			c->used_size -= jeb->used_size;
+			c->dirty_size -= jeb->dirty_size;
+			jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
+			jffs2_free_all_node_refs(c, jeb);
+			list_add(&jeb->list, &c->erasing_list);
+			spin_unlock(&c->erase_completion_lock);
+
+			jffs2_erase_block(c, jeb);
+
+		} else {
+			BUG();
+		}
+
+		/* Be nice */
+		cond_resched();
+		spin_lock(&c->erase_completion_lock);
+	}
+
+	spin_unlock(&c->erase_completion_lock);
+ done:
+	D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
+
+	up(&c->erase_free_sem);
+}
+
+static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
+	spin_lock(&c->erase_completion_lock);
+	list_del(&jeb->list);
+	list_add_tail(&jeb->list, &c->erase_complete_list);
+	spin_unlock(&c->erase_completion_lock);
+	/* Ensure that kupdated calls us again to mark them clean */
+	jffs2_erase_pending_trigger(c);
+}
+
+static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
+{
+	/* For NAND, if the failure did not occur at the device level for a
+	   specific physical page, don't bother updating the bad block table. */
+	if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) {
+		/* We had a device-level failure to erase.  Let's see if we've
+		   failed too many times. */
+		if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
+			/* We'd like to give this block another try. */
+			spin_lock(&c->erase_completion_lock);
+			list_del(&jeb->list);
+			list_add(&jeb->list, &c->erase_pending_list);
+			c->erasing_size -= c->sector_size;
+			c->dirty_size += c->sector_size;
+			jeb->dirty_size = c->sector_size;
+			spin_unlock(&c->erase_completion_lock);
+			return;
+		}
+	}
+
+	spin_lock(&c->erase_completion_lock);
+	c->erasing_size -= c->sector_size;
+	c->bad_size += c->sector_size;
+	list_del(&jeb->list);
+	list_add(&jeb->list, &c->bad_list);
+	c->nr_erasing_blocks--;
+	spin_unlock(&c->erase_completion_lock);
+	wake_up(&c->erase_wait);
+}	 
+
+#ifndef __ECOS
+static void jffs2_erase_callback(struct erase_info *instr)
+{
+	struct erase_priv_struct *priv = (void *)instr->priv;
+
+	if(instr->state != MTD_ERASE_DONE) {
+		printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state);
+		jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
+	} else {
+		jffs2_erase_succeeded(priv->c, priv->jeb);
+	}	
+	kfree(instr);
+}
+#endif /* !__ECOS */
+
+/* Hmmm. Maybe we should accept the extra space it takes and make
+   this a standard doubly-linked list? */
+static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
+			struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb)
+{
+	struct jffs2_inode_cache *ic = NULL;
+	struct jffs2_raw_node_ref **prev;
+
+	prev = &ref->next_in_ino;
+
+	/* Walk the inode's list once, removing any nodes from this eraseblock */
+	while (1) {
+		if (!(*prev)->next_in_ino) {
+			/* We're looking at the jffs2_inode_cache, which is 
+			   at the end of the linked list. Stash it and continue
+			   from the beginning of the list */
+			ic = (struct jffs2_inode_cache *)(*prev);
+			prev = &ic->nodes;
+			continue;
+		} 
+
+		if (((*prev)->flash_offset & ~(c->sector_size -1)) == jeb->offset) {
+			/* It's in the block we're erasing */
+			struct jffs2_raw_node_ref *this;
+
+			this = *prev;
+			*prev = this->next_in_ino;
+			this->next_in_ino = NULL;
+
+			if (this == ref)
+				break;
+
+			continue;
+		}
+		/* Not to be deleted. Skip */
+		prev = &((*prev)->next_in_ino);
+	}
+
+	/* PARANOIA */
+	if (!ic) {
+		printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n");
+		return;
+	}
+
+	D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
+		  jeb->offset, jeb->offset + c->sector_size, ic->ino));
+
+	D2({
+		int i=0;
+		struct jffs2_raw_node_ref *this;
+		printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG);
+
+		this = ic->nodes;
+	   
+		while(this) {
+			printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this));
+			if (++i == 5) {
+				printk("\n" KERN_DEBUG);
+				i=0;
+			}
+			this = this->next_in_ino;
+		}
+		printk("\n");
+	});
+
+	if (ic->nodes == (void *)ic) {
+		D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino));
+		jffs2_del_ino_cache(c, ic);
+		jffs2_free_inode_cache(ic);
+	}
+}
+
+static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	struct jffs2_raw_node_ref *ref;
+	D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset));
+	while(jeb->first_node) {
+		ref = jeb->first_node;
+		jeb->first_node = ref->next_phys;
+		
+		/* Remove from the inode-list */
+		if (ref->next_in_ino)
+			jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
+		/* else it was a non-inode node or already removed, so don't bother */
+
+		jffs2_free_raw_node_ref(ref);
+	}
+	jeb->last_node = NULL;
+}
+
+static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	struct jffs2_raw_node_ref *marker_ref = NULL;
+	unsigned char *ebuf;
+	size_t retlen;
+	int ret;
+	uint32_t bad_offset;
+
+	if (!jffs2_cleanmarker_oob(c)) {
+		marker_ref = jffs2_alloc_raw_node_ref();
+		if (!marker_ref) {
+			printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n");
+			/* Stick it back on the list from whence it came and come back later */
+			jffs2_erase_pending_trigger(c);
+			spin_lock(&c->erase_completion_lock);
+			list_add(&jeb->list, &c->erase_complete_list);
+			spin_unlock(&c->erase_completion_lock);
+			return;
+		}
+	}
+	ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!ebuf) {
+		printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset);
+	} else {
+		uint32_t ofs = jeb->offset;
+
+		D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
+		while(ofs < jeb->offset + c->sector_size) {
+			uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
+			int i;
+
+			bad_offset = ofs;
+
+			ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf);
+			if (ret) {
+				printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
+				goto bad;
+			}
+			if (retlen != readlen) {
+				printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
+				goto bad;
+			}
+			for (i=0; i<readlen; i += sizeof(unsigned long)) {
+				/* It's OK. We know it's properly aligned */
+				unsigned long datum = *(unsigned long *)(&ebuf[i]);
+				if (datum + 1) {
+					bad_offset += i;
+					printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset);
+				bad: 
+					if (!jffs2_cleanmarker_oob(c))
+						jffs2_free_raw_node_ref(marker_ref);
+					kfree(ebuf);
+				bad2:
+					spin_lock(&c->erase_completion_lock);
+					/* Stick it on a list (any list) so
+					   erase_failed can take it right off
+					   again.  Silly, but shouldn't happen
+					   often. */
+					list_add(&jeb->list, &c->erasing_list);
+					spin_unlock(&c->erase_completion_lock);
+					jffs2_erase_failed(c, jeb, bad_offset);
+					return;
+				}
+			}
+			ofs += readlen;
+			cond_resched();
+		}
+		kfree(ebuf);
+	}
+
+	bad_offset = jeb->offset;
+
+	/* Write the erase complete marker */	
+	D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
+	if (jffs2_cleanmarker_oob(c)) {
+
+		if (jffs2_write_nand_cleanmarker(c, jeb))
+			goto bad2;
+			
+		jeb->first_node = jeb->last_node = NULL;
+
+		jeb->free_size = c->sector_size;
+		jeb->used_size = 0;
+		jeb->dirty_size = 0;
+		jeb->wasted_size = 0;
+	} else {
+		struct kvec vecs[1];
+		struct jffs2_unknown_node marker = {
+			.magic =	cpu_to_je16(JFFS2_MAGIC_BITMASK),
+			.nodetype =	cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+			.totlen =	cpu_to_je32(c->cleanmarker_size)
+		};
+
+		marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
+
+		vecs[0].iov_base = (unsigned char *) &marker;
+		vecs[0].iov_len = sizeof(marker);
+		ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
+		
+		if (ret) {
+			printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
+			       jeb->offset, ret);
+			goto bad2;
+		}
+		if (retlen != sizeof(marker)) {
+			printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
+			       jeb->offset, sizeof(marker), retlen);
+			goto bad2;
+		}
+
+		marker_ref->next_in_ino = NULL;
+		marker_ref->next_phys = NULL;
+		marker_ref->flash_offset = jeb->offset | REF_NORMAL;
+		marker_ref->__totlen = c->cleanmarker_size;
+			
+		jeb->first_node = jeb->last_node = marker_ref;
+			
+		jeb->free_size = c->sector_size - c->cleanmarker_size;
+		jeb->used_size = c->cleanmarker_size;
+		jeb->dirty_size = 0;
+		jeb->wasted_size = 0;
+	}
+
+	spin_lock(&c->erase_completion_lock);
+	c->erasing_size -= c->sector_size;
+	c->free_size += jeb->free_size;
+	c->used_size += jeb->used_size;
+
+	ACCT_SANITY_CHECK(c,jeb);
+	D1(ACCT_PARANOIA_CHECK(jeb));
+
+	list_add_tail(&jeb->list, &c->free_list);
+	c->nr_erasing_blocks--;
+	c->nr_free_blocks++;
+	spin_unlock(&c->erase_completion_lock);
+	wake_up(&c->erase_wait);
+}
+
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
new file mode 100644
index 000000000000..0c607c1388f4
--- /dev/null
+++ b/fs/jffs2/file.c
@@ -0,0 +1,290 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: file.c,v 1.99 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/crc32.h>
+#include <linux/jffs2.h>
+#include "nodelist.h"
+
+extern int generic_file_open(struct inode *, struct file *) __attribute__((weak));
+extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) __attribute__((weak));
+
+static int jffs2_commit_write (struct file *filp, struct page *pg,
+			       unsigned start, unsigned end);
+static int jffs2_prepare_write (struct file *filp, struct page *pg,
+				unsigned start, unsigned end);
+static int jffs2_readpage (struct file *filp, struct page *pg);
+
+int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync)
+{
+	struct inode *inode = dentry->d_inode;
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+
+	/* Trigger GC to flush any pending writes for this inode */
+	jffs2_flush_wbuf_gc(c, inode->i_ino);
+			
+	return 0;	
+}
+
+struct file_operations jffs2_file_operations =
+{
+	.llseek =	generic_file_llseek,
+	.open =		generic_file_open,
+	.read =		generic_file_read,
+	.write =	generic_file_write,
+	.ioctl =	jffs2_ioctl,
+	.mmap =		generic_file_readonly_mmap,
+	.fsync =	jffs2_fsync,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,29)
+	.sendfile =	generic_file_sendfile
+#endif
+};
+
+/* jffs2_file_inode_operations */
+
+struct inode_operations jffs2_file_inode_operations =
+{
+	.setattr =	jffs2_setattr
+};
+
+struct address_space_operations jffs2_file_address_operations =
+{
+	.readpage =	jffs2_readpage,
+	.prepare_write =jffs2_prepare_write,
+	.commit_write =	jffs2_commit_write
+};
+
+static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
+{
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+	unsigned char *pg_buf;
+	int ret;
+
+	D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT));
+
+	if (!PageLocked(pg))
+                PAGE_BUG(pg);
+
+	pg_buf = kmap(pg);
+	/* FIXME: Can kmap fail? */
+
+	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);
+
+	if (ret) {
+		ClearPageUptodate(pg);
+		SetPageError(pg);
+	} else {
+		SetPageUptodate(pg);
+		ClearPageError(pg);
+	}
+
+	flush_dcache_page(pg);
+	kunmap(pg);
+
+	D2(printk(KERN_DEBUG "readpage finished\n"));
+	return 0;
+}
+
+int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg)
+{
+	int ret = jffs2_do_readpage_nolock(inode, pg);
+	unlock_page(pg);
+	return ret;
+}
+
+
+static int jffs2_readpage (struct file *filp, struct page *pg)
+{
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
+	int ret;
+	
+	down(&f->sem);
+	ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
+	up(&f->sem);
+	return ret;
+}
+
+static int jffs2_prepare_write (struct file *filp, struct page *pg,
+				unsigned start, unsigned end)
+{
+	struct inode *inode = pg->mapping->host;
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+	uint32_t pageofs = pg->index << PAGE_CACHE_SHIFT;
+	int ret = 0;
+
+	D1(printk(KERN_DEBUG "jffs2_prepare_write()\n"));
+
+	if (pageofs > inode->i_size) {
+		/* Make new hole frag from old EOF to new page */
+		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+		struct jffs2_raw_inode ri;
+		struct jffs2_full_dnode *fn;
+		uint32_t phys_ofs, alloc_len;
+		
+		D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
+			  (unsigned int)inode->i_size, pageofs));
+
+		ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL);
+		if (ret)
+			return ret;
+
+		down(&f->sem);
+		memset(&ri, 0, sizeof(ri));
+
+		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+		ri.totlen = cpu_to_je32(sizeof(ri));
+		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+		ri.ino = cpu_to_je32(f->inocache->ino);
+		ri.version = cpu_to_je32(++f->highest_version);
+		ri.mode = cpu_to_jemode(inode->i_mode);
+		ri.uid = cpu_to_je16(inode->i_uid);
+		ri.gid = cpu_to_je16(inode->i_gid);
+		ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
+		ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds());
+		ri.offset = cpu_to_je32(inode->i_size);
+		ri.dsize = cpu_to_je32(pageofs - inode->i_size);
+		ri.csize = cpu_to_je32(0);
+		ri.compr = JFFS2_COMPR_ZERO;
+		ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+		ri.data_crc = cpu_to_je32(0);
+		
+		fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+
+		if (IS_ERR(fn)) {
+			ret = PTR_ERR(fn);
+			jffs2_complete_reservation(c);
+			up(&f->sem);
+			return ret;
+		}
+		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
+		if (f->metadata) {
+			jffs2_mark_node_obsolete(c, f->metadata->raw);
+			jffs2_free_full_dnode(f->metadata);
+			f->metadata = NULL;
+		}
+		if (ret) {
+			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret));
+			jffs2_mark_node_obsolete(c, fn->raw);
+			jffs2_free_full_dnode(fn);
+			jffs2_complete_reservation(c);
+			up(&f->sem);
+			return ret;
+		}
+		jffs2_complete_reservation(c);
+		inode->i_size = pageofs;
+		up(&f->sem);
+	}
+	
+	/* Read in the page if it wasn't already present, unless it's a whole page */
+	if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) {
+		down(&f->sem);
+		ret = jffs2_do_readpage_nolock(inode, pg);
+		up(&f->sem);
+	}
+	D1(printk(KERN_DEBUG "end prepare_write(). pg->flags %lx\n", pg->flags));
+	return ret;
+}
+
+static int jffs2_commit_write (struct file *filp, struct page *pg,
+			       unsigned start, unsigned end)
+{
+	/* Actually commit the write from the page cache page we're looking at.
+	 * For now, we write the full page out each time. It sucks, but it's simple
+	 */
+	struct inode *inode = pg->mapping->host;
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+	struct jffs2_raw_inode *ri;
+	unsigned aligned_start = start & ~3;
+	int ret = 0;
+	uint32_t writtenlen = 0;
+
+	D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
+		  inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));
+
+	if (!start && end == PAGE_CACHE_SIZE) {
+		/* We need to avoid deadlock with page_cache_read() in
+		   jffs2_garbage_collect_pass(). So we have to mark the
+		   page up to date, to prevent page_cache_read() from 
+		   trying to re-lock it. */
+		SetPageUptodate(pg);
+	}
+
+	ri = jffs2_alloc_raw_inode();
+
+	if (!ri) {
+		D1(printk(KERN_DEBUG "jffs2_commit_write(): Allocation of raw inode failed\n"));
+		return -ENOMEM;
+	}
+
+	/* Set the fields that the generic jffs2_write_inode_range() code can't find */
+	ri->ino = cpu_to_je32(inode->i_ino);
+	ri->mode = cpu_to_jemode(inode->i_mode);
+	ri->uid = cpu_to_je16(inode->i_uid);
+	ri->gid = cpu_to_je16(inode->i_gid);
+	ri->isize = cpu_to_je32((uint32_t)inode->i_size);
+	ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());
+
+	/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
+	   hurt to do it again. The alternative is ifdefs, which are ugly. */
+	kmap(pg);
+
+	ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
+				      (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
+				      end - aligned_start, &writtenlen);
+
+	kunmap(pg);
+
+	if (ret) {
+		/* There was an error writing. */
+		SetPageError(pg);
+	}
+	
+	/* Adjust writtenlen for the padding we did, so we don't confuse our caller */
+	if (writtenlen < (start&3))
+		writtenlen = 0;
+	else
+		writtenlen -= (start&3);
+
+	if (writtenlen) {
+		if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) {
+			inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen;
+			inode->i_blocks = (inode->i_size + 511) >> 9;
+			
+			inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
+		}
+	}
+
+	jffs2_free_raw_inode(ri);
+
+	if (start+writtenlen < end) {
+		/* generic_file_write has written more to the page cache than we've
+		   actually written to the medium. Mark the page !Uptodate so that 
+		   it gets reread */
+		D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n"));
+		SetPageError(pg);
+		ClearPageUptodate(pg);
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",writtenlen?writtenlen:ret));
+	return writtenlen?writtenlen:ret;
+}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
new file mode 100644
index 000000000000..30ab233fe423
--- /dev/null
+++ b/fs/jffs2/fs.c
@@ -0,0 +1,677 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: fs.c,v 1.51 2004/11/28 12:19:37 dedekind Exp $
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/mtd/mtd.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/vfs.h>
+#include <linux/crc32.h>
+#include "nodelist.h"
+
+static int jffs2_flash_setup(struct jffs2_sb_info *c);
+
+static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
+{
+	struct jffs2_full_dnode *old_metadata, *new_metadata;
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+	struct jffs2_raw_inode *ri;
+	unsigned short dev;
+	unsigned char *mdata = NULL;
+	int mdatalen = 0;
+	unsigned int ivalid;
+	uint32_t phys_ofs, alloclen;
+	int ret;
+	D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
+	ret = inode_change_ok(inode, iattr);
+	if (ret) 
+		return ret;
+
+	/* Special cases - we don't want more than one data node
+	   for these types on the medium at any time. So setattr
+	   must read the original data associated with the node
+	   (i.e. the device numbers or the target name) and write
+	   it out again with the appropriate data attached */
+	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
+		/* For these, we don't actually need to read the old node */
+		dev = old_encode_dev(inode->i_rdev);
+		mdata = (char *)&dev;
+		mdatalen = sizeof(dev);
+		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
+	} else if (S_ISLNK(inode->i_mode)) {
+		mdatalen = f->metadata->size;
+		mdata = kmalloc(f->metadata->size, GFP_USER);
+		if (!mdata)
+			return -ENOMEM;
+		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
+		if (ret) {
+			kfree(mdata);
+			return ret;
+		}
+		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
+	}
+
+	ri = jffs2_alloc_raw_inode();
+	if (!ri) {
+		if (S_ISLNK(inode->i_mode))
+			kfree(mdata);
+		return -ENOMEM;
+	}
+		
+	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+	if (ret) {
+		jffs2_free_raw_inode(ri);
+		if (S_ISLNK(inode->i_mode & S_IFMT))
+			 kfree(mdata);
+		return ret;
+	}
+	down(&f->sem);
+	ivalid = iattr->ia_valid;
+	
+	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
+	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+
+	ri->ino = cpu_to_je32(inode->i_ino);
+	ri->version = cpu_to_je32(++f->highest_version);
+
+	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
+	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
+
+	if (ivalid & ATTR_MODE)
+		if (iattr->ia_mode & S_ISGID &&
+		    !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
+			ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
+		else 
+			ri->mode = cpu_to_jemode(iattr->ia_mode);
+	else
+		ri->mode = cpu_to_jemode(inode->i_mode);
+
+
+	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
+	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
+	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
+	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
+
+	ri->offset = cpu_to_je32(0);
+	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
+	ri->compr = JFFS2_COMPR_NONE;
+	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
+		/* It's an extension. Make it a hole node */
+		ri->compr = JFFS2_COMPR_ZERO;
+		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
+		ri->offset = cpu_to_je32(inode->i_size);
+	}
+	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+	if (mdatalen)
+		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
+	else
+		ri->data_crc = cpu_to_je32(0);
+
+	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL);
+	if (S_ISLNK(inode->i_mode))
+		kfree(mdata);
+	
+	if (IS_ERR(new_metadata)) {
+		jffs2_complete_reservation(c);
+		jffs2_free_raw_inode(ri);
+		up(&f->sem);
+		return PTR_ERR(new_metadata);
+	}
+	/* It worked. Update the inode */
+	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
+	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
+	inode->i_mode = jemode_to_cpu(ri->mode);
+	inode->i_uid = je16_to_cpu(ri->uid);
+	inode->i_gid = je16_to_cpu(ri->gid);
+
+
+	old_metadata = f->metadata;
+
+	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
+		jffs2_truncate_fraglist (c, &f->fragtree, iattr->ia_size);
+
+	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
+		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
+		inode->i_size = iattr->ia_size;
+		f->metadata = NULL;
+	} else {
+		f->metadata = new_metadata;
+	}
+	if (old_metadata) {
+		jffs2_mark_node_obsolete(c, old_metadata->raw);
+		jffs2_free_full_dnode(old_metadata);
+	}
+	jffs2_free_raw_inode(ri);
+
+	up(&f->sem);
+	jffs2_complete_reservation(c);
+
+	/* We have to do the vmtruncate() without f->sem held, since
+	   some pages may be locked and waiting for it in readpage(). 
+	   We are protected from a simultaneous write() extending i_size
+	   back past iattr->ia_size, because do_truncate() holds the
+	   generic inode semaphore. */
+	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
+		vmtruncate(inode, iattr->ia_size);
+
+	return 0;
+}
+
+int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+	return jffs2_do_setattr(dentry->d_inode, iattr);
+}
+
+int jffs2_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+	unsigned long avail;
+
+	buf->f_type = JFFS2_SUPER_MAGIC;
+	buf->f_bsize = 1 << PAGE_SHIFT;
+	buf->f_blocks = c->flash_size >> PAGE_SHIFT;
+	buf->f_files = 0;
+	buf->f_ffree = 0;
+	buf->f_namelen = JFFS2_MAX_NAME_LEN;
+
+	spin_lock(&c->erase_completion_lock);
+
+	avail = c->dirty_size + c->free_size;
+	if (avail > c->sector_size * c->resv_blocks_write)
+		avail -= c->sector_size * c->resv_blocks_write;
+	else
+		avail = 0;
+
+	buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
+
+	D2(jffs2_dump_block_lists(c));
+
+	spin_unlock(&c->erase_completion_lock);
+
+	return 0;
+}
+
+
+void jffs2_clear_inode (struct inode *inode)
+{
+	/* We can forget about this inode for now - drop all 
+	 *  the nodelists associated with it, etc.
+	 */
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+	
+	D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
+
+	jffs2_do_clear_inode(c, f);
+}
+
+void jffs2_read_inode (struct inode *inode)
+{
+	struct jffs2_inode_info *f;
+	struct jffs2_sb_info *c;
+	struct jffs2_raw_inode latest_node;
+	int ret;
+
+	D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino));
+
+	f = JFFS2_INODE_INFO(inode);
+	c = JFFS2_SB_INFO(inode->i_sb);
+
+	jffs2_init_inode_info(f);
+	
+	ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
+
+	if (ret) {
+		make_bad_inode(inode);
+		up(&f->sem);
+		return;
+	}
+	inode->i_mode = jemode_to_cpu(latest_node.mode);
+	inode->i_uid = je16_to_cpu(latest_node.uid);
+	inode->i_gid = je16_to_cpu(latest_node.gid);
+	inode->i_size = je32_to_cpu(latest_node.isize);
+	inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
+	inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
+	inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
+
+	inode->i_nlink = f->inocache->nlink;
+
+	inode->i_blksize = PAGE_SIZE;
+	inode->i_blocks = (inode->i_size + 511) >> 9;
+	
+	switch (inode->i_mode & S_IFMT) {
+		jint16_t rdev;
+
+	case S_IFLNK:
+		inode->i_op = &jffs2_symlink_inode_operations;
+		break;
+		
+	case S_IFDIR:
+	{
+		struct jffs2_full_dirent *fd;
+
+		for (fd=f->dents; fd; fd = fd->next) {
+			if (fd->type == DT_DIR && fd->ino)
+				inode->i_nlink++;
+		}
+		/* and '..' */
+		inode->i_nlink++;
+		/* Root dir gets i_nlink 3 for some reason */
+		if (inode->i_ino == 1)
+			inode->i_nlink++;
+
+		inode->i_op = &jffs2_dir_inode_operations;
+		inode->i_fop = &jffs2_dir_operations;
+		break;
+	}
+	case S_IFREG:
+		inode->i_op = &jffs2_file_inode_operations;
+		inode->i_fop = &jffs2_file_operations;
+		inode->i_mapping->a_ops = &jffs2_file_address_operations;
+		inode->i_mapping->nrpages = 0;
+		break;
+
+	case S_IFBLK:
+	case S_IFCHR:
+		/* Read the device numbers from the media */
+		D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
+		if (jffs2_read_dnode(c, f, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) {
+			/* Eep */
+			printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
+			up(&f->sem);
+			jffs2_do_clear_inode(c, f);
+			make_bad_inode(inode);
+			return;
+		}			
+
+	case S_IFSOCK:
+	case S_IFIFO:
+		inode->i_op = &jffs2_file_inode_operations;
+		init_special_inode(inode, inode->i_mode,
+				   old_decode_dev((je16_to_cpu(rdev))));
+		break;
+
+	default:
+		printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
+	}
+
+	up(&f->sem);
+
+	D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
+}
+
+void jffs2_dirty_inode(struct inode *inode)
+{
+	struct iattr iattr;
+
+	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
+		D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino));
+		return;
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino));
+
+	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
+	iattr.ia_mode = inode->i_mode;
+	iattr.ia_uid = inode->i_uid;
+	iattr.ia_gid = inode->i_gid;
+	iattr.ia_atime = inode->i_atime;
+	iattr.ia_mtime = inode->i_mtime;
+	iattr.ia_ctime = inode->i_ctime;
+
+	jffs2_do_setattr(inode, &iattr);
+}
+
+int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+	if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
+		return -EROFS;
+
+	/* We stop if it was running, then restart if it needs to.
+	   This also catches the case where it was stopped and this
+	   is just a remount to restart it.
+	   Flush the writebuffer, if neccecary, else we loose it */
+	if (!(sb->s_flags & MS_RDONLY)) {
+		jffs2_stop_garbage_collect_thread(c);
+		down(&c->alloc_sem);
+		jffs2_flush_wbuf_pad(c);
+		up(&c->alloc_sem);
+	}	
+
+	if (!(*flags & MS_RDONLY))
+		jffs2_start_garbage_collect_thread(c);
+	
+	*flags |= MS_NOATIME;
+
+	return 0;
+}
+
+void jffs2_write_super (struct super_block *sb)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+	sb->s_dirt = 0;
+
+	if (sb->s_flags & MS_RDONLY)
+		return;
+
+	D1(printk(KERN_DEBUG "jffs2_write_super()\n"));
+	jffs2_garbage_collect_trigger(c);
+	jffs2_erase_pending_blocks(c, 0);
+	jffs2_flush_wbuf_gc(c, 0);
+}
+
+
+/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
+   fill in the raw_inode while you're at it. */
+struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri)
+{
+	struct inode *inode;
+	struct super_block *sb = dir_i->i_sb;
+	struct jffs2_sb_info *c;
+	struct jffs2_inode_info *f;
+	int ret;
+
+	D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
+
+	c = JFFS2_SB_INFO(sb);
+	
+	inode = new_inode(sb);
+	
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	f = JFFS2_INODE_INFO(inode);
+	jffs2_init_inode_info(f);
+
+	memset(ri, 0, sizeof(*ri));
+	/* Set OS-specific defaults for new inodes */
+	ri->uid = cpu_to_je16(current->fsuid);
+
+	if (dir_i->i_mode & S_ISGID) {
+		ri->gid = cpu_to_je16(dir_i->i_gid);
+		if (S_ISDIR(mode))
+			mode |= S_ISGID;
+	} else {
+		ri->gid = cpu_to_je16(current->fsgid);
+	}
+	ri->mode =  cpu_to_jemode(mode);
+	ret = jffs2_do_new_inode (c, f, mode, ri);
+	if (ret) {
+		make_bad_inode(inode);
+		iput(inode);
+		return ERR_PTR(ret);
+	}
+	inode->i_nlink = 1;
+	inode->i_ino = je32_to_cpu(ri->ino);
+	inode->i_mode = jemode_to_cpu(ri->mode);
+	inode->i_gid = je16_to_cpu(ri->gid);
+	inode->i_uid = je16_to_cpu(ri->uid);
+	inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
+
+	inode->i_blksize = PAGE_SIZE;
+	inode->i_blocks = 0;
+	inode->i_size = 0;
+
+	insert_inode_hash(inode);
+
+	return inode;
+}
+
+
+int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct jffs2_sb_info *c;
+	struct inode *root_i;
+	int ret;
+	size_t blocks;
+
+	c = JFFS2_SB_INFO(sb);
+
+#ifndef CONFIG_JFFS2_FS_NAND
+	if (c->mtd->type == MTD_NANDFLASH) {
+		printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n");
+		return -EINVAL;
+	}
+#endif
+
+	c->flash_size = c->mtd->size;
+
+	/* 
+	 * Check, if we have to concatenate physical blocks to larger virtual blocks
+	 * to reduce the memorysize for c->blocks. (kmalloc allows max. 128K allocation)
+	 */
+	c->sector_size = c->mtd->erasesize; 
+	blocks = c->flash_size / c->sector_size;
+	if (!(c->mtd->flags & MTD_NO_VIRTBLOCKS)) {
+		while ((blocks * sizeof (struct jffs2_eraseblock)) > (128 * 1024)) {
+			blocks >>= 1;
+			c->sector_size <<= 1;
+		}	
+	}
+
+	/*
+	 * Size alignment check
+	 */
+	if ((c->sector_size * blocks) != c->flash_size) {
+		c->flash_size = c->sector_size * blocks;		
+		printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n",
+			c->flash_size / 1024);
+	}
+
+	if (c->sector_size != c->mtd->erasesize)
+		printk(KERN_INFO "jffs2: Erase block size too small (%dKiB). Using virtual blocks size (%dKiB) instead\n", 
+			c->mtd->erasesize / 1024, c->sector_size / 1024);
+
+	if (c->flash_size < 5*c->sector_size) {
+		printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size);
+		return -EINVAL;
+	}
+
+	c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
+	/* Joern -- stick alignment for weird 8-byte-page flash here */
+
+	/* NAND (or other bizarre) flash... do setup accordingly */
+	ret = jffs2_flash_setup(c);
+	if (ret)
+		return ret;
+
+	c->inocache_list = kmalloc(INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
+	if (!c->inocache_list) {
+		ret = -ENOMEM;
+		goto out_wbuf;
+	}
+	memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *));
+
+	if ((ret = jffs2_do_mount_fs(c)))
+		goto out_inohash;
+
+	ret = -EINVAL;
+
+	D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n"));
+	root_i = iget(sb, 1);
+	if (is_bad_inode(root_i)) {
+		D1(printk(KERN_WARNING "get root inode failed\n"));
+		goto out_nodes;
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
+	sb->s_root = d_alloc_root(root_i);
+	if (!sb->s_root)
+		goto out_root_i;
+
+#if LINUX_VERSION_CODE >= 0x20403
+	sb->s_maxbytes = 0xFFFFFFFF;
+#endif
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_magic = JFFS2_SUPER_MAGIC;
+	if (!(sb->s_flags & MS_RDONLY))
+		jffs2_start_garbage_collect_thread(c);
+	return 0;
+
+ out_root_i:
+	iput(root_i);
+ out_nodes:
+	jffs2_free_ino_caches(c);
+	jffs2_free_raw_node_refs(c);
+	if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
+		vfree(c->blocks);
+	else
+		kfree(c->blocks);
+ out_inohash:
+	kfree(c->inocache_list);
+ out_wbuf:
+	jffs2_flash_cleanup(c);
+
+	return ret;
+}
+
+void jffs2_gc_release_inode(struct jffs2_sb_info *c,
+				   struct jffs2_inode_info *f)
+{
+	iput(OFNI_EDONI_2SFFJ(f));
+}
+
+struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
+						     int inum, int nlink)
+{
+	struct inode *inode;
+	struct jffs2_inode_cache *ic;
+	if (!nlink) {
+		/* The inode has zero nlink but its nodes weren't yet marked
+		   obsolete. This has to be because we're still waiting for 
+		   the final (close() and) iput() to happen.
+
+		   There's a possibility that the final iput() could have 
+		   happened while we were contemplating. In order to ensure
+		   that we don't cause a new read_inode() (which would fail)
+		   for the inode in question, we use ilookup() in this case
+		   instead of iget().
+
+		   The nlink can't _become_ zero at this point because we're 
+		   holding the alloc_sem, and jffs2_do_unlink() would also
+		   need that while decrementing nlink on any inode.
+		*/
+		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
+		if (!inode) {
+			D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n",
+				  inum));
+
+			spin_lock(&c->inocache_lock);
+			ic = jffs2_get_ino_cache(c, inum);
+			if (!ic) {
+				D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum));
+				spin_unlock(&c->inocache_lock);
+				return NULL;
+			}
+			if (ic->state != INO_STATE_CHECKEDABSENT) {
+				/* Wait for progress. Don't just loop */
+				D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n",
+					  ic->ino, ic->state));
+				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+			} else {
+				spin_unlock(&c->inocache_lock);
+			}
+
+			return NULL;
+		}
+	} else {
+		/* Inode has links to it still; they're not going away because
+		   jffs2_do_unlink() would need the alloc_sem and we have it.
+		   Just iget() it, and if read_inode() is necessary that's OK.
+		*/
+		inode = iget(OFNI_BS_2SFFJ(c), inum);
+		if (!inode)
+			return ERR_PTR(-ENOMEM);
+	}
+	if (is_bad_inode(inode)) {
+		printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n",
+		       inum, nlink);
+		/* NB. This will happen again. We need to do something appropriate here. */
+		iput(inode);
+		return ERR_PTR(-EIO);
+	}
+
+	return JFFS2_INODE_INFO(inode);
+}
+
+unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, 
+				   struct jffs2_inode_info *f, 
+				   unsigned long offset,
+				   unsigned long *priv)
+{
+	struct inode *inode = OFNI_EDONI_2SFFJ(f);
+	struct page *pg;
+
+	pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, 
+			     (void *)jffs2_do_readpage_unlock, inode);
+	if (IS_ERR(pg))
+		return (void *)pg;
+	
+	*priv = (unsigned long)pg;
+	return kmap(pg);
+}
+
+void jffs2_gc_release_page(struct jffs2_sb_info *c,
+			   unsigned char *ptr,
+			   unsigned long *priv)
+{
+	struct page *pg = (void *)*priv;
+
+	kunmap(pg);
+	page_cache_release(pg);
+}
+
+static int jffs2_flash_setup(struct jffs2_sb_info *c) {
+	int ret = 0;
+	
+	if (jffs2_cleanmarker_oob(c)) {
+		/* NAND flash... do setup accordingly */
+		ret = jffs2_nand_flash_setup(c);
+		if (ret)
+			return ret;
+	}
+
+	/* add setups for other bizarre flashes here... */
+	if (jffs2_nor_ecc(c)) {
+		ret = jffs2_nor_ecc_flash_setup(c);
+		if (ret)
+			return ret;
+	}
+	return ret;
+}
+
+void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
+
+	if (jffs2_cleanmarker_oob(c)) {
+		jffs2_nand_flash_cleanup(c);
+	}
+
+	/* add cleanups for other bizarre flashes here... */
+	if (jffs2_nor_ecc(c)) {
+		jffs2_nor_ecc_flash_cleanup(c);
+	}
+}
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
new file mode 100644
index 000000000000..87ec74ff5930
--- /dev/null
+++ b/fs/jffs2/gc.c
@@ -0,0 +1,1246 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: gc.c,v 1.144 2004/12/21 11:18:50 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/crc32.h>
+#include <linux/compiler.h>
+#include <linux/stat.h>
+#include "nodelist.h"
+#include "compr.h"
+
+static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, 
+					  struct jffs2_inode_cache *ic,
+					  struct jffs2_raw_node_ref *raw);
+static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dnode *fd);
+static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
+static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
+static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				      struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+				      uint32_t start, uint32_t end);
+static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				       struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+				       uint32_t start, uint32_t end);
+static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_eraseblock *jeb,
+			       struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f);
+
+/* Called with erase_completion_lock held */
+static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
+{
+	struct jffs2_eraseblock *ret;
+	struct list_head *nextlist = NULL;
+	int n = jiffies % 128;
+
+	/* Pick an eraseblock to garbage collect next. This is where we'll
+	   put the clever wear-levelling algorithms. Eventually.  */
+	/* We possibly want to favour the dirtier blocks more when the
+	   number of free blocks is low. */
+	if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
+		D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n"));
+		nextlist = &c->bad_used_list;
+	} else if (n < 50 && !list_empty(&c->erasable_list)) {
+		/* Note that most of them will have gone directly to be erased. 
+		   So don't favour the erasable_list _too_ much. */
+		D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n"));
+		nextlist = &c->erasable_list;
+	} else if (n < 110 && !list_empty(&c->very_dirty_list)) {
+		/* Most of the time, pick one off the very_dirty list */
+		D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n"));
+		nextlist = &c->very_dirty_list;
+	} else if (n < 126 && !list_empty(&c->dirty_list)) {
+		D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n"));
+		nextlist = &c->dirty_list;
+	} else if (!list_empty(&c->clean_list)) {
+		D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n"));
+		nextlist = &c->clean_list;
+	} else if (!list_empty(&c->dirty_list)) {
+		D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n"));
+
+		nextlist = &c->dirty_list;
+	} else if (!list_empty(&c->very_dirty_list)) {
+		D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"));
+		nextlist = &c->very_dirty_list;
+	} else if (!list_empty(&c->erasable_list)) {
+		D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"));
+
+		nextlist = &c->erasable_list;
+	} else {
+		/* Eep. All were empty */
+		D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"));
+		return NULL;
+	}
+
+	ret = list_entry(nextlist->next, struct jffs2_eraseblock, list);
+	list_del(&ret->list);
+	c->gcblock = ret;
+	ret->gc_node = ret->first_node;
+	if (!ret->gc_node) {
+		printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
+		BUG();
+	}
+	
+	/* Have we accidentally picked a clean block with wasted space ? */
+	if (ret->wasted_size) {
+		D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
+		ret->dirty_size += ret->wasted_size;
+		c->wasted_size -= ret->wasted_size;
+		c->dirty_size += ret->wasted_size;
+		ret->wasted_size = 0;
+	}
+
+	D2(jffs2_dump_block_lists(c));
+	return ret;
+}
+
+/* jffs2_garbage_collect_pass
+ * Make a single attempt to progress GC. Move one node, and possibly
+ * start erasing one eraseblock.
+ */
+int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
+{
+	struct jffs2_inode_info *f;
+	struct jffs2_inode_cache *ic;
+	struct jffs2_eraseblock *jeb;
+	struct jffs2_raw_node_ref *raw;
+	int ret = 0, inum, nlink;
+
+	if (down_interruptible(&c->alloc_sem))
+		return -EINTR;
+
+	for (;;) {
+		spin_lock(&c->erase_completion_lock);
+		if (!c->unchecked_size)
+			break;
+
+		/* We can't start doing GC yet. We haven't finished checking
+		   the node CRCs etc. Do it now. */
+		
+		/* checked_ino is protected by the alloc_sem */
+		if (c->checked_ino > c->highest_ino) {
+			printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
+			       c->unchecked_size);
+			D2(jffs2_dump_block_lists(c));
+			spin_unlock(&c->erase_completion_lock);
+			BUG();
+		}
+
+		spin_unlock(&c->erase_completion_lock);
+
+		spin_lock(&c->inocache_lock);
+
+		ic = jffs2_get_ino_cache(c, c->checked_ino++);
+
+		if (!ic) {
+			spin_unlock(&c->inocache_lock);
+			continue;
+		}
+
+		if (!ic->nlink) {
+			D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n",
+				  ic->ino));
+			spin_unlock(&c->inocache_lock);
+			continue;
+		}
+		switch(ic->state) {
+		case INO_STATE_CHECKEDABSENT:
+		case INO_STATE_PRESENT:
+			D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino));
+			spin_unlock(&c->inocache_lock);
+			continue;
+
+		case INO_STATE_GC:
+		case INO_STATE_CHECKING:
+			printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state);
+			spin_unlock(&c->inocache_lock);
+			BUG();
+
+		case INO_STATE_READING:
+			/* We need to wait for it to finish, lest we move on
+			   and trigger the BUG() above while we haven't yet 
+			   finished checking all its nodes */
+			D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
+			up(&c->alloc_sem);
+			sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+			return 0;
+
+		default:
+			BUG();
+
+		case INO_STATE_UNCHECKED:
+			;
+		}
+		ic->state = INO_STATE_CHECKING;
+		spin_unlock(&c->inocache_lock);
+
+		D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino));
+
+		ret = jffs2_do_crccheck_inode(c, ic);
+		if (ret)
+			printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);
+
+		jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
+		up(&c->alloc_sem);
+		return ret;
+	}
+
+	/* First, work out which block we're garbage-collecting */
+	jeb = c->gcblock;
+
+	if (!jeb)
+		jeb = jffs2_find_gc_block(c);
+
+	if (!jeb) {
+		D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
+		spin_unlock(&c->erase_completion_lock);
+		up(&c->alloc_sem);
+		return -EIO;
+	}
+
+	D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size));
+	D1(if (c->nextblock)
+	   printk(KERN_DEBUG "Nextblock at  %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
+
+	if (!jeb->used_size) {
+		up(&c->alloc_sem);
+		goto eraseit;
+	}
+
+	raw = jeb->gc_node;
+			
+	while(ref_obsolete(raw)) {
+		D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
+		raw = raw->next_phys;
+		if (unlikely(!raw)) {
+			printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
+			printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", 
+			       jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
+			jeb->gc_node = raw;
+			spin_unlock(&c->erase_completion_lock);
+			up(&c->alloc_sem);
+			BUG();
+		}
+	}
+	jeb->gc_node = raw;
+
+	D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw)));
+
+	if (!raw->next_in_ino) {
+		/* Inode-less node. Clean marker, snapshot or something like that */
+		/* FIXME: If it's something that needs to be copied, including something
+		   we don't grok that has JFFS2_NODETYPE_RWCOMPAT_COPY, we should do so */
+		spin_unlock(&c->erase_completion_lock);
+		jffs2_mark_node_obsolete(c, raw);
+		up(&c->alloc_sem);
+		goto eraseit_lock;
+	}
+
+	ic = jffs2_raw_ref_to_ic(raw);
+
+	/* We need to hold the inocache. Either the erase_completion_lock or
+	   the inocache_lock are sufficient; we trade down since the inocache_lock 
+	   causes less contention. */
+	spin_lock(&c->inocache_lock);
+
+	spin_unlock(&c->erase_completion_lock);
+
+	D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino));
+
+	/* Three possibilities:
+	   1. Inode is already in-core. We must iget it and do proper
+	      updating to its fragtree, etc.
+	   2. Inode is not in-core, node is REF_PRISTINE. We lock the
+	      inocache to prevent a read_inode(), copy the node intact.
+	   3. Inode is not in-core, node is not pristine. We must iget()
+	      and take the slow path.
+	*/
+
+	switch(ic->state) {
+	case INO_STATE_CHECKEDABSENT:
+		/* It's been checked, but it's not currently in-core. 
+		   We can just copy any pristine nodes, but have
+		   to prevent anyone else from doing read_inode() while
+		   we're at it, so we set the state accordingly */
+		if (ref_flags(raw) == REF_PRISTINE)
+			ic->state = INO_STATE_GC;
+		else {
+			D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", 
+				  ic->ino));
+		}
+		break;
+
+	case INO_STATE_PRESENT:
+		/* It's in-core. GC must iget() it. */
+		break;
+
+	case INO_STATE_UNCHECKED:
+	case INO_STATE_CHECKING:
+	case INO_STATE_GC:
+		/* Should never happen. We should have finished checking
+		   by the time we actually start doing any GC, and since 
+		   we're holding the alloc_sem, no other garbage collection 
+		   can happen.
+		*/
+		printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
+		       ic->ino, ic->state);
+		up(&c->alloc_sem);
+		spin_unlock(&c->inocache_lock);
+		BUG();
+
+	case INO_STATE_READING:
+		/* Someone's currently trying to read it. We must wait for
+		   them to finish and then go through the full iget() route
+		   to do the GC. However, sometimes read_inode() needs to get
+		   the alloc_sem() (for marking nodes invalid) so we must
+		   drop the alloc_sem before sleeping. */
+
+		up(&c->alloc_sem);
+		D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
+			  ic->ino, ic->state));
+		sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+		/* And because we dropped the alloc_sem we must start again from the 
+		   beginning. Ponder chance of livelock here -- we're returning success
+		   without actually making any progress.
+
+		   Q: What are the chances that the inode is back in INO_STATE_READING 
+		   again by the time we next enter this function? And that this happens
+		   enough times to cause a real delay?
+
+		   A: Small enough that I don't care :) 
+		*/
+		return 0;
+	}
+
+	/* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
+	   node intact, and we don't have to muck about with the fragtree etc. 
+	   because we know it's not in-core. If it _was_ in-core, we go through
+	   all the iget() crap anyway */
+
+	if (ic->state == INO_STATE_GC) {
+		spin_unlock(&c->inocache_lock);
+
+		ret = jffs2_garbage_collect_pristine(c, ic, raw);
+
+		spin_lock(&c->inocache_lock);
+		ic->state = INO_STATE_CHECKEDABSENT;
+		wake_up(&c->inocache_wq);
+
+		if (ret != -EBADFD) {
+			spin_unlock(&c->inocache_lock);
+			goto release_sem;
+		}
+
+		/* Fall through if it wanted us to, with inocache_lock held */
+	}
+
+	/* Prevent the fairly unlikely race where the gcblock is
+	   entirely obsoleted by the final close of a file which had
+	   the only valid nodes in the block, followed by erasure,
+	   followed by freeing of the ic because the erased block(s)
+	   held _all_ the nodes of that inode.... never been seen but
+	   it's vaguely possible. */
+
+	inum = ic->ino;
+	nlink = ic->nlink;
+	spin_unlock(&c->inocache_lock);
+
+	f = jffs2_gc_fetch_inode(c, inum, nlink);
+	if (IS_ERR(f)) {
+		ret = PTR_ERR(f);
+		goto release_sem;
+	}
+	if (!f) {
+		ret = 0;
+		goto release_sem;
+	}
+
+	ret = jffs2_garbage_collect_live(c, jeb, raw, f);
+
+	jffs2_gc_release_inode(c, f);
+
+ release_sem:
+	up(&c->alloc_sem);
+
+ eraseit_lock:
+	/* If we've finished this block, start it erasing */
+	spin_lock(&c->erase_completion_lock);
+
+ eraseit:
+	if (c->gcblock && !c->gcblock->used_size) {
+		D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset));
+		/* We're GC'ing an empty block? */
+		list_add_tail(&c->gcblock->list, &c->erase_pending_list);
+		c->gcblock = NULL;
+		c->nr_erasing_blocks++;
+		jffs2_erase_pending_trigger(c);
+	}
+	spin_unlock(&c->erase_completion_lock);
+
+	return ret;
+}
+
+static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_eraseblock *jeb,
+				      struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f)
+{
+	struct jffs2_node_frag *frag;
+	struct jffs2_full_dnode *fn = NULL;
+	struct jffs2_full_dirent *fd;
+	uint32_t start = 0, end = 0, nrfrags = 0;
+	int ret = 0;
+
+	down(&f->sem);
+
+	/* Now we have the lock for this inode. Check that it's still the one at the head
+	   of the list. */
+
+	spin_lock(&c->erase_completion_lock);
+
+	if (c->gcblock != jeb) {
+		spin_unlock(&c->erase_completion_lock);
+		D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n"));
+		goto upnout;
+	}
+	if (ref_obsolete(raw)) {
+		spin_unlock(&c->erase_completion_lock);
+		D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n"));
+		/* They'll call again */
+		goto upnout;
+	}
+	spin_unlock(&c->erase_completion_lock);
+
+	/* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */
+	if (f->metadata && f->metadata->raw == raw) {
+		fn = f->metadata;
+		ret = jffs2_garbage_collect_metadata(c, jeb, f, fn);
+		goto upnout;
+	}
+
+	/* FIXME. Read node and do lookup? */
+	for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
+		if (frag->node && frag->node->raw == raw) {
+			fn = frag->node;
+			end = frag->ofs + frag->size;
+			if (!nrfrags++)
+				start = frag->ofs;
+			if (nrfrags == frag->node->frags)
+				break; /* We've found them all */
+		}
+	}
+	if (fn) {
+		if (ref_flags(raw) == REF_PRISTINE) {
+			ret = jffs2_garbage_collect_pristine(c, f->inocache, raw);
+			if (!ret) {
+				/* Urgh. Return it sensibly. */
+				frag->node->raw = f->inocache->nodes;
+			}	
+			if (ret != -EBADFD)
+				goto upnout;
+		}
+		/* We found a datanode. Do the GC */
+		if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) {
+			/* It crosses a page boundary. Therefore, it must be a hole. */
+			ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
+		} else {
+			/* It could still be a hole. But we GC the page this way anyway */
+			ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end);
+		}
+		goto upnout;
+	}
+	
+	/* Wasn't a dnode. Try dirent */
+	for (fd = f->dents; fd; fd=fd->next) {
+		if (fd->raw == raw)
+			break;
+	}
+
+	if (fd && fd->ino) {
+		ret = jffs2_garbage_collect_dirent(c, jeb, f, fd);
+	} else if (fd) {
+		ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
+	} else {
+		printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n",
+		       ref_offset(raw), f->inocache->ino);
+		if (ref_obsolete(raw)) {
+			printk(KERN_WARNING "But it's obsolete so we don't mind too much\n");
+		} else {
+			ret = -EIO;
+		}
+	}
+ upnout:
+	up(&f->sem);
+
+	return ret;
+}
+
+static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, 
+					  struct jffs2_inode_cache *ic,
+					  struct jffs2_raw_node_ref *raw)
+{
+	union jffs2_node_union *node;
+	struct jffs2_raw_node_ref *nraw;
+	size_t retlen;
+	int ret;
+	uint32_t phys_ofs, alloclen;
+	uint32_t crc, rawlen;
+	int retried = 0;
+
+	D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)));
+
+	rawlen = ref_totlen(c, c->gcblock, raw);
+
+	/* Ask for a small amount of space (or the totlen if smaller) because we
+	   don't want to force wastage of the end of a block if splitting would
+	   work. */
+	ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN, 
+					      rawlen), &phys_ofs, &alloclen);
+	if (ret)
+		return ret;
+
+	if (alloclen < rawlen) {
+		/* Doesn't fit untouched. We'll go the old route and split it */
+		return -EBADFD;
+	}
+
+	node = kmalloc(rawlen, GFP_KERNEL);
+	if (!node)
+               return -ENOMEM;
+
+	ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node);
+	if (!ret && retlen != rawlen)
+		ret = -EIO;
+	if (ret)
+		goto out_node;
+
+	crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
+	if (je32_to_cpu(node->u.hdr_crc) != crc) {
+		printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+		       ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
+		goto bail;
+	}
+
+	switch(je16_to_cpu(node->u.nodetype)) {
+	case JFFS2_NODETYPE_INODE:
+		crc = crc32(0, node, sizeof(node->i)-8);
+		if (je32_to_cpu(node->i.node_crc) != crc) {
+			printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+			       ref_offset(raw), je32_to_cpu(node->i.node_crc), crc);
+			goto bail;
+		}
+
+		if (je32_to_cpu(node->i.dsize)) {
+			crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
+			if (je32_to_cpu(node->i.data_crc) != crc) {
+				printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+				       ref_offset(raw), je32_to_cpu(node->i.data_crc), crc);
+				goto bail;
+			}
+		}
+		break;
+
+	case JFFS2_NODETYPE_DIRENT:
+		crc = crc32(0, node, sizeof(node->d)-8);
+		if (je32_to_cpu(node->d.node_crc) != crc) {
+			printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+			       ref_offset(raw), je32_to_cpu(node->d.node_crc), crc);
+			goto bail;
+		}
+
+		if (node->d.nsize) {
+			crc = crc32(0, node->d.name, node->d.nsize);
+			if (je32_to_cpu(node->d.name_crc) != crc) {
+				printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent ode at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+				       ref_offset(raw), je32_to_cpu(node->d.name_crc), crc);
+				goto bail;
+			}
+		}
+		break;
+	default:
+		printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", 
+		       ref_offset(raw), je16_to_cpu(node->u.nodetype));
+		goto bail;
+	}
+
+	nraw = jffs2_alloc_raw_node_ref();
+	if (!nraw) {
+		ret = -ENOMEM;
+		goto out_node;
+	}
+
+	/* OK, all the CRCs are good; this node can just be copied as-is. */
+ retry:
+	nraw->flash_offset = phys_ofs;
+	nraw->__totlen = rawlen;
+	nraw->next_phys = NULL;
+
+	ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
+
+	if (ret || (retlen != rawlen)) {
+		printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
+                       rawlen, phys_ofs, ret, retlen);
+		if (retlen) {
+                        /* Doesn't belong to any inode */
+			nraw->next_in_ino = NULL;
+
+			nraw->flash_offset |= REF_OBSOLETE;
+			jffs2_add_physical_node_ref(c, nraw);
+			jffs2_mark_node_obsolete(c, nraw);
+		} else {
+			printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", nraw->flash_offset);
+                        jffs2_free_raw_node_ref(nraw);
+		}
+		if (!retried && (nraw = jffs2_alloc_raw_node_ref())) {
+			/* Try to reallocate space and retry */
+			uint32_t dummy;
+			struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size];
+
+			retried = 1;
+
+			D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n"));
+			
+			ACCT_SANITY_CHECK(c,jeb);
+			D1(ACCT_PARANOIA_CHECK(jeb));
+
+			ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy);
+
+			if (!ret) {
+				D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs));
+
+				ACCT_SANITY_CHECK(c,jeb);
+				D1(ACCT_PARANOIA_CHECK(jeb));
+
+				goto retry;
+			}
+			D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+			jffs2_free_raw_node_ref(nraw);
+		}
+
+		jffs2_free_raw_node_ref(nraw);
+		if (!ret)
+			ret = -EIO;
+		goto out_node;
+	}
+	nraw->flash_offset |= REF_PRISTINE;
+	jffs2_add_physical_node_ref(c, nraw);
+
+	/* Link into per-inode list. This is safe because of the ic
+	   state being INO_STATE_GC. Note that if we're doing this
+	   for an inode which is in-core, the 'nraw' pointer is then
+	   going to be fetched from ic->nodes by our caller. */
+	spin_lock(&c->erase_completion_lock);
+        nraw->next_in_ino = ic->nodes;
+        ic->nodes = nraw;
+	spin_unlock(&c->erase_completion_lock);
+
+	jffs2_mark_node_obsolete(c, raw);
+	D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw)));
+
+ out_node:
+	kfree(node);
+	return ret;
+ bail:
+	ret = -EBADFD;
+	goto out_node;
+}
+
+static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
+{
+	struct jffs2_full_dnode *new_fn;
+	struct jffs2_raw_inode ri;
+	jint16_t dev;
+	char *mdata = NULL, mdatalen = 0;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
+	    S_ISCHR(JFFS2_F_I_MODE(f)) ) {
+		/* For these, we don't actually need to read the old node */
+		/* FIXME: for minor or major > 255. */
+		dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) | 
+			JFFS2_F_I_RDEV_MIN(f)));
+		mdata = (char *)&dev;
+		mdatalen = sizeof(dev);
+		D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen));
+	} else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
+		mdatalen = fn->size;
+		mdata = kmalloc(fn->size, GFP_KERNEL);
+		if (!mdata) {
+			printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
+			return -ENOMEM;
+		}
+		ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
+		if (ret) {
+			printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret);
+			kfree(mdata);
+			return ret;
+		}
+		D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen));
+
+	}
+	
+	ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen);
+	if (ret) {
+		printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
+		       sizeof(ri)+ mdatalen, ret);
+		goto out;
+	}
+	
+	memset(&ri, 0, sizeof(ri));
+	ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+	ri.totlen = cpu_to_je32(sizeof(ri) + mdatalen);
+	ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+	ri.ino = cpu_to_je32(f->inocache->ino);
+	ri.version = cpu_to_je32(++f->highest_version);
+	ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
+	ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
+	ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
+	ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
+	ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
+	ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
+	ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+	ri.offset = cpu_to_je32(0);
+	ri.csize = cpu_to_je32(mdatalen);
+	ri.dsize = cpu_to_je32(mdatalen);
+	ri.compr = JFFS2_COMPR_NONE;
+	ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+	ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
+
+	new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, phys_ofs, ALLOC_GC);
+
+	if (IS_ERR(new_fn)) {
+		printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
+		ret = PTR_ERR(new_fn);
+		goto out;
+	}
+	jffs2_mark_node_obsolete(c, fn->raw);
+	jffs2_free_full_dnode(fn);
+	f->metadata = new_fn;
+ out:
+	if (S_ISLNK(JFFS2_F_I_MODE(f)))
+		kfree(mdata);
+	return ret;
+}
+
+static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
+{
+	struct jffs2_full_dirent *new_fd;
+	struct jffs2_raw_dirent rd;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd.nsize = strlen(fd->name);
+	rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize);
+	rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd.pino = cpu_to_je32(f->inocache->ino);
+	rd.version = cpu_to_je32(++f->highest_version);
+	rd.ino = cpu_to_je32(fd->ino);
+	rd.mctime = cpu_to_je32(max(JFFS2_F_I_MTIME(f), JFFS2_F_I_CTIME(f)));
+	rd.type = fd->type;
+	rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
+	rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
+	
+	ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen);
+	if (ret) {
+		printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
+		       sizeof(rd)+rd.nsize, ret);
+		return ret;
+	}
+	new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, phys_ofs, ALLOC_GC);
+
+	if (IS_ERR(new_fd)) {
+		printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd));
+		return PTR_ERR(new_fd);
+	}
+	jffs2_add_fd_to_list(c, new_fd, &f->dents);
+	return 0;
+}
+
+static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
+{
+	struct jffs2_full_dirent **fdp = &f->dents;
+	int found = 0;
+
+	/* On a medium where we can't actually mark nodes obsolete
+	   pernamently, such as NAND flash, we need to work out
+	   whether this deletion dirent is still needed to actively
+	   delete a 'real' dirent with the same name that's still
+	   somewhere else on the flash. */
+	if (!jffs2_can_mark_obsolete(c)) {
+		struct jffs2_raw_dirent *rd;
+		struct jffs2_raw_node_ref *raw;
+		int ret;
+		size_t retlen;
+		int name_len = strlen(fd->name);
+		uint32_t name_crc = crc32(0, fd->name, name_len);
+		uint32_t rawlen = ref_totlen(c, jeb, fd->raw);
+
+		rd = kmalloc(rawlen, GFP_KERNEL);
+		if (!rd)
+			return -ENOMEM;
+
+		/* Prevent the erase code from nicking the obsolete node refs while
+		   we're looking at them. I really don't like this extra lock but
+		   can't see any alternative. Suggestions on a postcard to... */
+		down(&c->erase_free_sem);
+
+		for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
+
+			/* We only care about obsolete ones */
+			if (!(ref_obsolete(raw)))
+				continue;
+
+			/* Any dirent with the same name is going to have the same length... */
+			if (ref_totlen(c, NULL, raw) != rawlen)
+				continue;
+
+			/* Doesn't matter if there's one in the same erase block. We're going to 
+			   delete it too at the same time. */
+			if ((raw->flash_offset & ~(c->sector_size-1)) ==
+			    (fd->raw->flash_offset & ~(c->sector_size-1)))
+				continue;
+
+			D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw)));
+
+			/* This is an obsolete node belonging to the same directory, and it's of the right
+			   length. We need to take a closer look...*/
+			ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
+			if (ret) {
+				printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw));
+				/* If we can't read it, we don't need to continue to obsolete it. Continue */
+				continue;
+			}
+			if (retlen != rawlen) {
+				printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
+				       retlen, rawlen, ref_offset(raw));
+				continue;
+			}
+
+			if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT)
+				continue;
+
+			/* If the name CRC doesn't match, skip */
+			if (je32_to_cpu(rd->name_crc) != name_crc)
+				continue;
+
+			/* If the name length doesn't match, or it's another deletion dirent, skip */
+			if (rd->nsize != name_len || !je32_to_cpu(rd->ino))
+				continue;
+
+			/* OK, check the actual name now */
+			if (memcmp(rd->name, fd->name, name_len))
+				continue;
+
+			/* OK. The name really does match. There really is still an older node on
+			   the flash which our deletion dirent obsoletes. So we have to write out
+			   a new deletion dirent to replace it */
+			up(&c->erase_free_sem);
+
+			D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
+				  ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
+			kfree(rd);
+
+			return jffs2_garbage_collect_dirent(c, jeb, f, fd);
+		}
+
+		up(&c->erase_free_sem);
+		kfree(rd);
+	}
+
+	/* No need for it any more. Just mark it obsolete and remove it from the list */
+	while (*fdp) {
+		if ((*fdp) == fd) {
+			found = 1;
+			*fdp = fd->next;
+			break;
+		}
+		fdp = &(*fdp)->next;
+	}
+	if (!found) {
+		printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino);
+	}
+	jffs2_mark_node_obsolete(c, fd->raw);
+	jffs2_free_full_dirent(fd);
+	return 0;
+}
+
+static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				      struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+				      uint32_t start, uint32_t end)
+{
+	struct jffs2_raw_inode ri;
+	struct jffs2_node_frag *frag;
+	struct jffs2_full_dnode *new_fn;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
+		  f->inocache->ino, start, end));
+	
+	memset(&ri, 0, sizeof(ri));
+
+	if(fn->frags > 1) {
+		size_t readlen;
+		uint32_t crc;
+		/* It's partially obsoleted by a later write. So we have to 
+		   write it out again with the _same_ version as before */
+		ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
+		if (readlen != sizeof(ri) || ret) {
+			printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen);
+			goto fill;
+		}
+		if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
+			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
+			       ref_offset(fn->raw),
+			       je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
+			return -EIO;
+		}
+		if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
+			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
+			       ref_offset(fn->raw),
+			       je32_to_cpu(ri.totlen), sizeof(ri));
+			return -EIO;
+		}
+		crc = crc32(0, &ri, sizeof(ri)-8);
+		if (crc != je32_to_cpu(ri.node_crc)) {
+			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
+			       ref_offset(fn->raw), 
+			       je32_to_cpu(ri.node_crc), crc);
+			/* FIXME: We could possibly deal with this by writing new holes for each frag */
+			printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", 
+			       start, end, f->inocache->ino);
+			goto fill;
+		}
+		if (ri.compr != JFFS2_COMPR_ZERO) {
+			printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw));
+			printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", 
+			       start, end, f->inocache->ino);
+			goto fill;
+		}
+	} else {
+	fill:
+		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+		ri.totlen = cpu_to_je32(sizeof(ri));
+		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+		ri.ino = cpu_to_je32(f->inocache->ino);
+		ri.version = cpu_to_je32(++f->highest_version);
+		ri.offset = cpu_to_je32(start);
+		ri.dsize = cpu_to_je32(end - start);
+		ri.csize = cpu_to_je32(0);
+		ri.compr = JFFS2_COMPR_ZERO;
+	}
+	ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
+	ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
+	ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
+	ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
+	ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
+	ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
+	ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+	ri.data_crc = cpu_to_je32(0);
+	ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+
+	ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen);
+	if (ret) {
+		printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
+		       sizeof(ri), ret);
+		return ret;
+	}
+	new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_GC);
+
+	if (IS_ERR(new_fn)) {
+		printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn));
+		return PTR_ERR(new_fn);
+	}
+	if (je32_to_cpu(ri.version) == f->highest_version) {
+		jffs2_add_full_dnode_to_inode(c, f, new_fn);
+		if (f->metadata) {
+			jffs2_mark_node_obsolete(c, f->metadata->raw);
+			jffs2_free_full_dnode(f->metadata);
+			f->metadata = NULL;
+		}
+		return 0;
+	}
+
+	/* 
+	 * We should only get here in the case where the node we are
+	 * replacing had more than one frag, so we kept the same version
+	 * number as before. (Except in case of error -- see 'goto fill;' 
+	 * above.)
+	 */
+	D1(if(unlikely(fn->frags <= 1)) {
+		printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
+		       fn->frags, je32_to_cpu(ri.version), f->highest_version,
+		       je32_to_cpu(ri.ino));
+	});
+
+	/* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
+	mark_ref_normal(new_fn->raw);
+
+	for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); 
+	     frag; frag = frag_next(frag)) {
+		if (frag->ofs > fn->size + fn->ofs)
+			break;
+		if (frag->node == fn) {
+			frag->node = new_fn;
+			new_fn->frags++;
+			fn->frags--;
+		}
+	}
+	if (fn->frags) {
+		printk(KERN_WARNING "jffs2_garbage_collect_hole: Old node still has frags!\n");
+		BUG();
+	}
+	if (!new_fn->frags) {
+		printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n");
+		BUG();
+	}
+		
+	jffs2_mark_node_obsolete(c, fn->raw);
+	jffs2_free_full_dnode(fn);
+	
+	return 0;
+}
+
+static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				       struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+				       uint32_t start, uint32_t end)
+{
+	struct jffs2_full_dnode *new_fn;
+	struct jffs2_raw_inode ri;
+	uint32_t alloclen, phys_ofs, offset, orig_end, orig_start;	
+	int ret = 0;
+	unsigned char *comprbuf = NULL, *writebuf;
+	unsigned long pg;
+	unsigned char *pg_ptr;
+ 
+	memset(&ri, 0, sizeof(ri));
+
+	D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
+		  f->inocache->ino, start, end));
+
+	orig_end = end;
+	orig_start = start;
+
+	if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) {
+		/* Attempt to do some merging. But only expand to cover logically
+		   adjacent frags if the block containing them is already considered
+		   to be dirty. Otherwise we end up with GC just going round in 
+		   circles dirtying the nodes it already wrote out, especially 
+		   on NAND where we have small eraseblocks and hence a much higher
+		   chance of nodes having to be split to cross boundaries. */
+
+		struct jffs2_node_frag *frag;
+		uint32_t min, max;
+
+		min = start & ~(PAGE_CACHE_SIZE-1);
+		max = min + PAGE_CACHE_SIZE;
+
+		frag = jffs2_lookup_node_frag(&f->fragtree, start);
+
+		/* BUG_ON(!frag) but that'll happen anyway... */
+
+		BUG_ON(frag->ofs != start);
+
+		/* First grow down... */
+		while((frag = frag_prev(frag)) && frag->ofs >= min) {
+
+			/* If the previous frag doesn't even reach the beginning, there's
+			   excessive fragmentation. Just merge. */
+			if (frag->ofs > min) {
+				D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n",
+					  frag->ofs, frag->ofs+frag->size));
+				start = frag->ofs;
+				continue;
+			}
+			/* OK. This frag holds the first byte of the page. */
+			if (!frag->node || !frag->node->raw) {
+				D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
+					  frag->ofs, frag->ofs+frag->size));
+				break;
+			} else {
+
+				/* OK, it's a frag which extends to the beginning of the page. Does it live 
+				   in a block which is still considered clean? If so, don't obsolete it.
+				   If not, cover it anyway. */
+
+				struct jffs2_raw_node_ref *raw = frag->node->raw;
+				struct jffs2_eraseblock *jeb;
+
+				jeb = &c->blocks[raw->flash_offset / c->sector_size];
+
+				if (jeb == c->gcblock) {
+					D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
+						  frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
+					start = frag->ofs;
+					break;
+				}
+				if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
+					D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
+						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+					break;
+				}
+
+				D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
+						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+				start = frag->ofs;
+				break;
+			}
+		}
+
+		/* ... then up */
+
+		/* Find last frag which is actually part of the node we're to GC. */
+		frag = jffs2_lookup_node_frag(&f->fragtree, end-1);
+
+		while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) {
+
+			/* If the previous frag doesn't even reach the beginning, there's lots
+			   of fragmentation. Just merge. */
+			if (frag->ofs+frag->size < max) {
+				D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n",
+					  frag->ofs, frag->ofs+frag->size));
+				end = frag->ofs + frag->size;
+				continue;
+			}
+
+			if (!frag->node || !frag->node->raw) {
+				D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
+					  frag->ofs, frag->ofs+frag->size));
+				break;
+			} else {
+
+				/* OK, it's a frag which extends to the beginning of the page. Does it live 
+				   in a block which is still considered clean? If so, don't obsolete it.
+				   If not, cover it anyway. */
+
+				struct jffs2_raw_node_ref *raw = frag->node->raw;
+				struct jffs2_eraseblock *jeb;
+
+				jeb = &c->blocks[raw->flash_offset / c->sector_size];
+
+				if (jeb == c->gcblock) {
+					D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
+						  frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
+					end = frag->ofs + frag->size;
+					break;
+				}
+				if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
+					D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
+						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+					break;
+				}
+
+				D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
+						  frag->ofs, frag->ofs+frag->size, jeb->offset));
+				end = frag->ofs + frag->size;
+				break;
+			}
+		}
+		D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", 
+			  orig_start, orig_end, start, end));
+
+		BUG_ON(end > JFFS2_F_I_SIZE(f));
+		BUG_ON(end < orig_end);
+		BUG_ON(start > orig_start);
+	}
+	
+	/* First, use readpage() to read the appropriate page into the page cache */
+	/* Q: What happens if we actually try to GC the _same_ page for which commit_write()
+	 *    triggered garbage collection in the first place?
+	 * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
+	 *    page OK. We'll actually write it out again in commit_write, which is a little
+	 *    suboptimal, but at least we're correct.
+	 */
+	pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
+
+	if (IS_ERR(pg_ptr)) {
+		printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr));
+		return PTR_ERR(pg_ptr);
+	}
+
+	offset = start;
+	while(offset < orig_end) {
+		uint32_t datalen;
+		uint32_t cdatalen;
+		uint16_t comprtype = JFFS2_COMPR_NONE;
+
+		ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen);
+
+		if (ret) {
+			printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
+			       sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret);
+			break;
+		}
+		cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
+		datalen = end - offset;
+
+		writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
+
+		comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
+
+		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+		ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen);
+		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+		ri.ino = cpu_to_je32(f->inocache->ino);
+		ri.version = cpu_to_je32(++f->highest_version);
+		ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
+		ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
+		ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
+		ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
+		ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
+		ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
+		ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+		ri.offset = cpu_to_je32(offset);
+		ri.csize = cpu_to_je32(cdatalen);
+		ri.dsize = cpu_to_je32(datalen);
+		ri.compr = comprtype & 0xff;
+		ri.usercompr = (comprtype >> 8) & 0xff;
+		ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+		ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
+	
+		new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC);
+
+		jffs2_free_comprbuf(comprbuf, writebuf);
+
+		if (IS_ERR(new_fn)) {
+			printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
+			ret = PTR_ERR(new_fn);
+			break;
+		}
+		ret = jffs2_add_full_dnode_to_inode(c, f, new_fn);
+		offset += datalen;
+		if (f->metadata) {
+			jffs2_mark_node_obsolete(c, f->metadata->raw);
+			jffs2_free_full_dnode(f->metadata);
+			f->metadata = NULL;
+		}
+	}
+
+	jffs2_gc_release_page(c, pg_ptr, &pg);
+	return ret;
+}
+
diff --git a/fs/jffs2/histo.h b/fs/jffs2/histo.h
new file mode 100644
index 000000000000..84f184f0836f
--- /dev/null
+++ b/fs/jffs2/histo.h
@@ -0,0 +1,3 @@
+/* This file provides the bit-probabilities for the input file */
+#define BIT_DIVIDER 629 
+static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */
diff --git a/fs/jffs2/histo_mips.h b/fs/jffs2/histo_mips.h
new file mode 100644
index 000000000000..9a443268d885
--- /dev/null
+++ b/fs/jffs2/histo_mips.h
@@ -0,0 +1,2 @@
+#define BIT_DIVIDER_MIPS 1043 
+static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */
diff --git a/fs/jffs2/ioctl.c b/fs/jffs2/ioctl.c
new file mode 100644
index 000000000000..238c7992064c
--- /dev/null
+++ b/fs/jffs2/ioctl.c
@@ -0,0 +1,23 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: ioctl.c,v 1.9 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#include <linux/fs.h>
+
+int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 
+		unsigned long arg)
+{
+	/* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which
+	   will include compression support etc. */
+	return -ENOTTY;
+}
+	
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
new file mode 100644
index 000000000000..5abb431c2a00
--- /dev/null
+++ b/fs/jffs2/malloc.c
@@ -0,0 +1,205 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: malloc.c,v 1.28 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/jffs2.h>
+#include "nodelist.h"
+
+#if 0
+#define JFFS2_SLAB_POISON SLAB_POISON
+#else
+#define JFFS2_SLAB_POISON 0
+#endif
+
+// replace this by #define D3 (x) x for cache debugging
+#define D3(x)
+
+/* These are initialised to NULL in the kernel startup code.
+   If you're porting to other operating systems, beware */
+static kmem_cache_t *full_dnode_slab;
+static kmem_cache_t *raw_dirent_slab;
+static kmem_cache_t *raw_inode_slab;
+static kmem_cache_t *tmp_dnode_info_slab;
+static kmem_cache_t *raw_node_ref_slab;
+static kmem_cache_t *node_frag_slab;
+static kmem_cache_t *inode_cache_slab;
+
+int __init jffs2_create_slab_caches(void)
+{
+	full_dnode_slab = kmem_cache_create("jffs2_full_dnode", 
+					    sizeof(struct jffs2_full_dnode),
+					    0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!full_dnode_slab)
+		goto err;
+
+	raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
+					    sizeof(struct jffs2_raw_dirent),
+					    0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!raw_dirent_slab)
+		goto err;
+
+	raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
+					   sizeof(struct jffs2_raw_inode),
+					   0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!raw_inode_slab)
+		goto err;
+
+	tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
+						sizeof(struct jffs2_tmp_dnode_info),
+						0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!tmp_dnode_info_slab)
+		goto err;
+
+	raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref",
+					      sizeof(struct jffs2_raw_node_ref),
+					      0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!raw_node_ref_slab)
+		goto err;
+
+	node_frag_slab = kmem_cache_create("jffs2_node_frag",
+					   sizeof(struct jffs2_node_frag),
+					   0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (!node_frag_slab)
+		goto err;
+
+	inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
+					     sizeof(struct jffs2_inode_cache),
+					     0, JFFS2_SLAB_POISON, NULL, NULL);
+	if (inode_cache_slab)
+		return 0;
+ err:
+	jffs2_destroy_slab_caches();
+	return -ENOMEM;
+}
+
+void jffs2_destroy_slab_caches(void)
+{
+	if(full_dnode_slab)
+		kmem_cache_destroy(full_dnode_slab);
+	if(raw_dirent_slab)
+		kmem_cache_destroy(raw_dirent_slab);
+	if(raw_inode_slab)
+		kmem_cache_destroy(raw_inode_slab);
+	if(tmp_dnode_info_slab)
+		kmem_cache_destroy(tmp_dnode_info_slab);
+	if(raw_node_ref_slab)
+		kmem_cache_destroy(raw_node_ref_slab);
+	if(node_frag_slab)
+		kmem_cache_destroy(node_frag_slab);
+	if(inode_cache_slab)
+		kmem_cache_destroy(inode_cache_slab);
+}
+
+struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
+{
+	return kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
+}
+
+void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
+{
+	kfree(x);
+}
+
+struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
+{
+	struct jffs2_full_dnode *ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_full_dnode at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
+{
+	D3 (printk (KERN_DEBUG "free full_dnode at %p\n", x));
+	kmem_cache_free(full_dnode_slab, x);
+}
+
+struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
+{
+	struct jffs2_raw_dirent *ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_raw_dirent\n", ret));
+	return ret;
+}
+
+void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
+{
+	D3 (printk (KERN_DEBUG "free_raw_dirent at %p\n", x));
+	kmem_cache_free(raw_dirent_slab, x);
+}
+
+struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
+{
+	struct jffs2_raw_inode *ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_raw_inode at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
+{
+	D3 (printk (KERN_DEBUG "free_raw_inode at %p\n", x));
+	kmem_cache_free(raw_inode_slab, x);
+}
+
+struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
+{
+	struct jffs2_tmp_dnode_info *ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_tmp_dnode_info at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
+{
+	D3 (printk (KERN_DEBUG "free_tmp_dnode_info at %p\n", x));
+	kmem_cache_free(tmp_dnode_info_slab, x);
+}
+
+struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void)
+{
+	struct jffs2_raw_node_ref *ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_raw_node_ref at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x)
+{
+	D3 (printk (KERN_DEBUG "free_raw_node_ref at %p\n", x));
+	kmem_cache_free(raw_node_ref_slab, x);
+}
+
+struct jffs2_node_frag *jffs2_alloc_node_frag(void)
+{
+	struct jffs2_node_frag *ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
+	D3 (printk (KERN_DEBUG "alloc_node_frag at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_node_frag(struct jffs2_node_frag *x)
+{
+	D3 (printk (KERN_DEBUG "free_node_frag at %p\n", x));
+	kmem_cache_free(node_frag_slab, x);
+}
+
+struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
+{
+	struct jffs2_inode_cache *ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
+	D3 (printk(KERN_DEBUG "Allocated inocache at %p\n", ret));
+	return ret;
+}
+
+void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
+{
+	D3 (printk(KERN_DEBUG "Freeing inocache at %p\n", x));
+	kmem_cache_free(inode_cache_slab, x);
+}
+
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
new file mode 100644
index 000000000000..cd6a8bd13e0b
--- /dev/null
+++ b/fs/jffs2/nodelist.c
@@ -0,0 +1,681 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: nodelist.c,v 1.90 2004/12/08 17:59:20 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/mtd/mtd.h>
+#include <linux/rbtree.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include "nodelist.h"
+
+void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list)
+{
+	struct jffs2_full_dirent **prev = list;
+	D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list));
+
+	while ((*prev) && (*prev)->nhash <= new->nhash) {
+		if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
+			/* Duplicate. Free one */
+			if (new->version < (*prev)->version) {
+				D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n"));
+				D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino));
+				jffs2_mark_node_obsolete(c, new->raw);
+				jffs2_free_full_dirent(new);
+			} else {
+				D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino));
+				new->next = (*prev)->next;
+				jffs2_mark_node_obsolete(c, ((*prev)->raw));
+				jffs2_free_full_dirent(*prev);
+				*prev = new;
+			}
+			goto out;
+		}
+		prev = &((*prev)->next);
+	}
+	new->next = *prev;
+	*prev = new;
+
+ out:
+	D2(while(*list) {
+		printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino);
+		list = &(*list)->next;
+	});
+}
+
+/* Put a new tmp_dnode_info into the list, keeping the list in 
+   order of increasing version
+*/
+static void jffs2_add_tn_to_list(struct jffs2_tmp_dnode_info *tn, struct jffs2_tmp_dnode_info **list)
+{
+	struct jffs2_tmp_dnode_info **prev = list;
+	
+	while ((*prev) && (*prev)->version < tn->version) {
+		prev = &((*prev)->next);
+	}
+	tn->next = (*prev);
+        *prev = tn;
+}
+
+static void jffs2_free_tmp_dnode_info_list(struct jffs2_tmp_dnode_info *tn)
+{
+	struct jffs2_tmp_dnode_info *next;
+
+	while (tn) {
+		next = tn;
+		tn = tn->next;
+		jffs2_free_full_dnode(next->fn);
+		jffs2_free_tmp_dnode_info(next);
+	}
+}
+
+static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
+{
+	struct jffs2_full_dirent *next;
+
+	while (fd) {
+		next = fd->next;
+		jffs2_free_full_dirent(fd);
+		fd = next;
+	}
+}
+
+/* Returns first valid node after 'ref'. May return 'ref' */
+static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
+{
+	while (ref && ref->next_in_ino) {
+		if (!ref_obsolete(ref))
+			return ref;
+		D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)));
+		ref = ref->next_in_ino;
+	}
+	return NULL;
+}
+
+/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
+   with this ino, returning the former in order of version */
+
+int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			  struct jffs2_tmp_dnode_info **tnp, struct jffs2_full_dirent **fdp,
+			  uint32_t *highest_version, uint32_t *latest_mctime,
+			  uint32_t *mctime_ver)
+{
+	struct jffs2_raw_node_ref *ref, *valid_ref;
+	struct jffs2_tmp_dnode_info *tn, *ret_tn = NULL;
+	struct jffs2_full_dirent *fd, *ret_fd = NULL;
+	union jffs2_node_union node;
+	size_t retlen;
+	int err;
+
+	*mctime_ver = 0;
+	
+	D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino));
+
+	spin_lock(&c->erase_completion_lock);
+
+	valid_ref = jffs2_first_valid_node(f->inocache->nodes);
+
+	if (!valid_ref)
+		printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino);
+
+	while (valid_ref) {
+		/* We can hold a pointer to a non-obsolete node without the spinlock,
+		   but _obsolete_ nodes may disappear at any time, if the block
+		   they're in gets erased. So if we mark 'ref' obsolete while we're
+		   not holding the lock, it can go away immediately. For that reason,
+		   we find the next valid node first, before processing 'ref'.
+		*/
+		ref = valid_ref;
+		valid_ref = jffs2_first_valid_node(ref->next_in_ino);
+		spin_unlock(&c->erase_completion_lock);
+
+		cond_resched();
+
+		/* FIXME: point() */
+		err = jffs2_flash_read(c, (ref_offset(ref)), 
+				       min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
+				       &retlen, (void *)&node);
+		if (err) {
+			printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref));
+			goto free_out;
+		}
+			
+
+			/* Check we've managed to read at least the common node header */
+		if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) {
+			printk(KERN_WARNING "short read in get_inode_nodes()\n");
+			err = -EIO;
+			goto free_out;
+		}
+			
+		switch (je16_to_cpu(node.u.nodetype)) {
+		case JFFS2_NODETYPE_DIRENT:
+			D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref)));
+			if (ref_flags(ref) == REF_UNCHECKED) {
+				printk(KERN_WARNING "BUG: Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref));
+				BUG();
+			}
+			if (retlen < sizeof(node.d)) {
+				printk(KERN_WARNING "short read in get_inode_nodes()\n");
+				err = -EIO;
+				goto free_out;
+			}
+			/* sanity check */
+			if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) {
+				printk(KERN_NOTICE "jffs2_get_inode_nodes(): Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n",
+				       ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen));
+				jffs2_mark_node_obsolete(c, ref);
+				spin_lock(&c->erase_completion_lock);
+				continue;
+			}
+			if (je32_to_cpu(node.d.version) > *highest_version)
+				*highest_version = je32_to_cpu(node.d.version);
+			if (ref_obsolete(ref)) {
+				/* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
+				printk(KERN_ERR "Dirent node at 0x%08x became obsolete while we weren't looking\n",
+				       ref_offset(ref));
+				BUG();
+			}
+			
+			fd = jffs2_alloc_full_dirent(node.d.nsize+1);
+			if (!fd) {
+				err = -ENOMEM;
+				goto free_out;
+			}
+			fd->raw = ref;
+			fd->version = je32_to_cpu(node.d.version);
+			fd->ino = je32_to_cpu(node.d.ino);
+			fd->type = node.d.type;
+
+			/* Pick out the mctime of the latest dirent */
+			if(fd->version > *mctime_ver) {
+				*mctime_ver = fd->version;
+				*latest_mctime = je32_to_cpu(node.d.mctime);
+			}
+
+			/* memcpy as much of the name as possible from the raw
+			   dirent we've already read from the flash
+			*/
+			if (retlen > sizeof(struct jffs2_raw_dirent))
+				memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent))));
+				
+			/* Do we need to copy any more of the name directly
+			   from the flash?
+			*/
+			if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) {
+				/* FIXME: point() */
+				int already = retlen - sizeof(struct jffs2_raw_dirent);
+					
+				err = jffs2_flash_read(c, (ref_offset(ref)) + retlen, 
+						   node.d.nsize - already, &retlen, &fd->name[already]);
+				if (!err && retlen != node.d.nsize - already)
+					err = -EIO;
+					
+				if (err) {
+					printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err);
+					jffs2_free_full_dirent(fd);
+					goto free_out;
+				}
+			}
+			fd->nhash = full_name_hash(fd->name, node.d.nsize);
+			fd->next = NULL;
+			fd->name[node.d.nsize] = '\0';
+				/* Wheee. We now have a complete jffs2_full_dirent structure, with
+				   the name in it and everything. Link it into the list 
+				*/
+			D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino));
+			jffs2_add_fd_to_list(c, fd, &ret_fd);
+			break;
+
+		case JFFS2_NODETYPE_INODE:
+			D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref)));
+			if (retlen < sizeof(node.i)) {
+				printk(KERN_WARNING "read too short for dnode\n");
+				err = -EIO;
+				goto free_out;
+			}
+			if (je32_to_cpu(node.i.version) > *highest_version)
+				*highest_version = je32_to_cpu(node.i.version);
+			D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", je32_to_cpu(node.i.version), *highest_version));
+
+			if (ref_obsolete(ref)) {
+				/* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
+				printk(KERN_ERR "Inode node at 0x%08x became obsolete while we weren't looking\n",
+				       ref_offset(ref));
+				BUG();
+			}
+
+			/* If we've never checked the CRCs on this node, check them now. */
+			if (ref_flags(ref) == REF_UNCHECKED) {
+				uint32_t crc, len;
+				struct jffs2_eraseblock *jeb;
+
+				crc = crc32(0, &node, sizeof(node.i)-8);
+				if (crc != je32_to_cpu(node.i.node_crc)) {
+					printk(KERN_NOTICE "jffs2_get_inode_nodes(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+					       ref_offset(ref), je32_to_cpu(node.i.node_crc), crc);
+					jffs2_mark_node_obsolete(c, ref);
+					spin_lock(&c->erase_completion_lock);
+					continue;
+				}
+				
+				/* sanity checks */
+				if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) ||
+				     PAD(je32_to_cpu(node.i.csize) + sizeof (node.i)) != PAD(je32_to_cpu(node.i.totlen))) {
+					printk(KERN_NOTICE "jffs2_get_inode_nodes(): Inode corrupted at 0x%08x, totlen %d, #ino  %d, version %d, isize %d, csize %d, dsize %d \n",
+						ref_offset(ref),  je32_to_cpu(node.i.totlen),  je32_to_cpu(node.i.ino),
+						je32_to_cpu(node.i.version),  je32_to_cpu(node.i.isize), 
+						je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize));
+					jffs2_mark_node_obsolete(c, ref);
+					spin_lock(&c->erase_completion_lock);
+					continue;
+				}
+
+				if (node.i.compr != JFFS2_COMPR_ZERO && je32_to_cpu(node.i.csize)) {
+					unsigned char *buf=NULL;
+					uint32_t pointed = 0;
+#ifndef __ECOS
+					if (c->mtd->point) {
+						err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
+								     &retlen, &buf);
+						if (!err && retlen < je32_to_cpu(node.i.csize)) {
+							D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen));
+							c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
+						} else if (err){
+							D1(printk(KERN_DEBUG "MTD point failed %d\n", err));
+						} else
+							pointed = 1; /* succefully pointed to device */
+					}
+#endif					
+					if(!pointed){
+						buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL);
+						if (!buf)
+							return -ENOMEM;
+						
+						err = jffs2_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
+								       &retlen, buf);
+						if (!err && retlen != je32_to_cpu(node.i.csize))
+							err = -EIO;
+						if (err) {
+							kfree(buf);
+							return err;
+						}
+					}
+					crc = crc32(0, buf, je32_to_cpu(node.i.csize));
+					if(!pointed)
+						kfree(buf);
+#ifndef __ECOS
+					else
+						c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
+#endif
+
+					if (crc != je32_to_cpu(node.i.data_crc)) {
+						printk(KERN_NOTICE "jffs2_get_inode_nodes(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+						       ref_offset(ref), je32_to_cpu(node.i.data_crc), crc);
+						jffs2_mark_node_obsolete(c, ref);
+						spin_lock(&c->erase_completion_lock);
+						continue;
+					}
+					
+				}
+
+				/* Mark the node as having been checked and fix the accounting accordingly */
+				spin_lock(&c->erase_completion_lock);
+				jeb = &c->blocks[ref->flash_offset / c->sector_size];
+				len = ref_totlen(c, jeb, ref);
+
+				jeb->used_size += len;
+				jeb->unchecked_size -= len;
+				c->used_size += len;
+				c->unchecked_size -= len;
+
+				/* If node covers at least a whole page, or if it starts at the 
+				   beginning of a page and runs to the end of the file, or if 
+				   it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. 
+
+				   If it's actually overlapped, it'll get made NORMAL (or OBSOLETE) 
+				   when the overlapping node(s) get added to the tree anyway. 
+				*/
+				if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) ||
+				    ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) &&
+				      (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) ==  je32_to_cpu(node.i.isize)))) {
+					D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref)));
+					ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
+				} else {
+					D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref)));
+					ref->flash_offset = ref_offset(ref) | REF_NORMAL;
+				}
+				spin_unlock(&c->erase_completion_lock);
+			}
+
+			tn = jffs2_alloc_tmp_dnode_info();
+			if (!tn) {
+				D1(printk(KERN_DEBUG "alloc tn failed\n"));
+				err = -ENOMEM;
+				goto free_out;
+			}
+
+			tn->fn = jffs2_alloc_full_dnode();
+			if (!tn->fn) {
+				D1(printk(KERN_DEBUG "alloc fn failed\n"));
+				err = -ENOMEM;
+				jffs2_free_tmp_dnode_info(tn);
+				goto free_out;
+			}
+			tn->version = je32_to_cpu(node.i.version);
+			tn->fn->ofs = je32_to_cpu(node.i.offset);
+			/* There was a bug where we wrote hole nodes out with
+			   csize/dsize swapped. Deal with it */
+			if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize))
+				tn->fn->size = je32_to_cpu(node.i.csize);
+			else // normal case...
+				tn->fn->size = je32_to_cpu(node.i.dsize);
+			tn->fn->raw = ref;
+			D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n",
+				  ref_offset(ref), je32_to_cpu(node.i.version),
+				  je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize)));
+			jffs2_add_tn_to_list(tn, &ret_tn);
+			break;
+
+		default:
+			if (ref_flags(ref) == REF_UNCHECKED) {
+				struct jffs2_eraseblock *jeb;
+				uint32_t len;
+
+				printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n",
+				       je16_to_cpu(node.u.nodetype), ref_offset(ref));
+
+				/* Mark the node as having been checked and fix the accounting accordingly */
+				spin_lock(&c->erase_completion_lock);
+				jeb = &c->blocks[ref->flash_offset / c->sector_size];
+				len = ref_totlen(c, jeb, ref);
+
+				jeb->used_size += len;
+				jeb->unchecked_size -= len;
+				c->used_size += len;
+				c->unchecked_size -= len;
+
+				mark_ref_normal(ref);
+				spin_unlock(&c->erase_completion_lock);
+			}
+			node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype));
+			if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) {
+				/* Hmmm. This should have been caught at scan time. */
+				printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n",
+				       ref_offset(ref));
+				printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n", 
+				       je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen),
+				       je32_to_cpu(node.u.hdr_crc));
+				jffs2_mark_node_obsolete(c, ref);
+			} else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) {
+			case JFFS2_FEATURE_INCOMPAT:
+				printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
+				/* EEP */
+				BUG();
+				break;
+			case JFFS2_FEATURE_ROCOMPAT:
+				printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
+				if (!(c->flags & JFFS2_SB_FLAG_RO))
+					BUG();
+				break;
+			case JFFS2_FEATURE_RWCOMPAT_COPY:
+				printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
+				break;
+			case JFFS2_FEATURE_RWCOMPAT_DELETE:
+				printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
+				jffs2_mark_node_obsolete(c, ref);
+				break;
+			}
+
+		}
+		spin_lock(&c->erase_completion_lock);
+
+	}
+	spin_unlock(&c->erase_completion_lock);
+	*tnp = ret_tn;
+	*fdp = ret_fd;
+
+	return 0;
+
+ free_out:
+	jffs2_free_tmp_dnode_info_list(ret_tn);
+	jffs2_free_full_dirent_list(ret_fd);
+	return err;
+}
+
+void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state)
+{
+	spin_lock(&c->inocache_lock);
+	ic->state = state;
+	wake_up(&c->inocache_wq);
+	spin_unlock(&c->inocache_lock);
+}
+
+/* During mount, this needs no locking. During normal operation, its
+   callers want to do other stuff while still holding the inocache_lock.
+   Rather than introducing special case get_ino_cache functions or 
+   callbacks, we just let the caller do the locking itself. */
+   
+struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
+{
+	struct jffs2_inode_cache *ret;
+
+	D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino));
+
+	ret = c->inocache_list[ino % INOCACHE_HASHSIZE];
+	while (ret && ret->ino < ino) {
+		ret = ret->next;
+	}
+	
+	if (ret && ret->ino != ino)
+		ret = NULL;
+
+	D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino));
+	return ret;
+}
+
+void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new)
+{
+	struct jffs2_inode_cache **prev;
+	D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino));
+	spin_lock(&c->inocache_lock);
+	
+	prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE];
+
+	while ((*prev) && (*prev)->ino < new->ino) {
+		prev = &(*prev)->next;
+	}
+	new->next = *prev;
+	*prev = new;
+
+	spin_unlock(&c->inocache_lock);
+}
+
+void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
+{
+	struct jffs2_inode_cache **prev;
+	D2(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino));
+	spin_lock(&c->inocache_lock);
+	
+	prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE];
+	
+	while ((*prev) && (*prev)->ino < old->ino) {
+		prev = &(*prev)->next;
+	}
+	if ((*prev) == old) {
+		*prev = old->next;
+	}
+
+	spin_unlock(&c->inocache_lock);
+}
+
+void jffs2_free_ino_caches(struct jffs2_sb_info *c)
+{
+	int i;
+	struct jffs2_inode_cache *this, *next;
+	
+	for (i=0; i<INOCACHE_HASHSIZE; i++) {
+		this = c->inocache_list[i];
+		while (this) {
+			next = this->next;
+			D2(printk(KERN_DEBUG "jffs2_free_ino_caches: Freeing ino #%u at %p\n", this->ino, this));
+			jffs2_free_inode_cache(this);
+			this = next;
+		}
+		c->inocache_list[i] = NULL;
+	}
+}
+
+void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
+{
+	int i;
+	struct jffs2_raw_node_ref *this, *next;
+
+	for (i=0; i<c->nr_blocks; i++) {
+		this = c->blocks[i].first_node;
+		while(this) {
+			next = this->next_phys;
+			jffs2_free_raw_node_ref(this);
+			this = next;
+		}
+		c->blocks[i].first_node = c->blocks[i].last_node = NULL;
+	}
+}
+	
+struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
+{
+	/* The common case in lookup is that there will be a node 
+	   which precisely matches. So we go looking for that first */
+	struct rb_node *next;
+	struct jffs2_node_frag *prev = NULL;
+	struct jffs2_node_frag *frag = NULL;
+
+	D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset));
+
+	next = fragtree->rb_node;
+
+	while(next) {
+		frag = rb_entry(next, struct jffs2_node_frag, rb);
+
+		D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n",
+			  frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right));
+		if (frag->ofs + frag->size <= offset) {
+			D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n",
+				  frag->ofs, frag->ofs+frag->size));
+			/* Remember the closest smaller match on the way down */
+			if (!prev || frag->ofs > prev->ofs)
+				prev = frag;
+			next = frag->rb.rb_right;
+		} else if (frag->ofs > offset) {
+			D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n",
+				  frag->ofs, frag->ofs+frag->size));
+			next = frag->rb.rb_left;
+		} else {
+			D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n",
+				  frag->ofs, frag->ofs+frag->size));
+			return frag;
+		}
+	}
+
+	/* Exact match not found. Go back up looking at each parent,
+	   and return the closest smaller one */
+
+	if (prev)
+		D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n",
+			  prev->ofs, prev->ofs+prev->size));
+	else 
+		D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n"));
+	
+	return prev;
+}
+
+/* Pass 'c' argument to indicate that nodes should be marked obsolete as
+   they're killed. */
+void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
+{
+	struct jffs2_node_frag *frag;
+	struct jffs2_node_frag *parent;
+
+	if (!root->rb_node)
+		return;
+
+	frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
+
+	while(frag) {
+		if (frag->rb.rb_left) {
+			D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n", 
+				  frag, frag->ofs, frag->ofs+frag->size));
+			frag = frag_left(frag);
+			continue;
+		}
+		if (frag->rb.rb_right) {
+			D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n", 
+				  frag, frag->ofs, frag->ofs+frag->size));
+			frag = frag_right(frag);
+			continue;
+		}
+
+		D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n",
+			  frag->ofs, frag->ofs+frag->size, frag->node,
+			  frag->node?frag->node->frags:0));
+			
+		if (frag->node && !(--frag->node->frags)) {
+			/* Not a hole, and it's the final remaining frag 
+			   of this node. Free the node */
+			if (c)
+				jffs2_mark_node_obsolete(c, frag->node->raw);
+			
+			jffs2_free_full_dnode(frag->node);
+		}
+		parent = frag_parent(frag);
+		if (parent) {
+			if (frag_left(parent) == frag)
+				parent->rb.rb_left = NULL;
+			else 
+				parent->rb.rb_right = NULL;
+		}
+
+		jffs2_free_node_frag(frag);
+		frag = parent;
+
+		cond_resched();
+	}
+}
+
+void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
+{
+	struct rb_node *parent = &base->rb;
+	struct rb_node **link = &parent;
+
+	D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag, 
+		  newfrag->ofs, newfrag->ofs+newfrag->size, base));
+
+	while (*link) {
+		parent = *link;
+		base = rb_entry(parent, struct jffs2_node_frag, rb);
+	
+		D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs));
+		if (newfrag->ofs > base->ofs)
+			link = &base->rb.rb_right;
+		else if (newfrag->ofs < base->ofs)
+			link = &base->rb.rb_left;
+		else {
+			printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
+			BUG();
+		}
+	}
+
+	rb_link_node(&newfrag->rb, &base->rb, link);
+}
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
new file mode 100644
index 000000000000..a4864d05ea92
--- /dev/null
+++ b/fs/jffs2/nodelist.h
@@ -0,0 +1,473 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: nodelist.h,v 1.126 2004/11/19 15:06:29 dedekind Exp $
+ *
+ */
+
+#ifndef __JFFS2_NODELIST_H__
+#define __JFFS2_NODELIST_H__
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/jffs2.h>
+#include <linux/jffs2_fs_sb.h>
+#include <linux/jffs2_fs_i.h>
+
+#ifdef __ECOS
+#include "os-ecos.h"
+#else
+#include <linux/mtd/compatmac.h> /* For min/max in older kernels */
+#include "os-linux.h"
+#endif
+
+#ifndef CONFIG_JFFS2_FS_DEBUG
+#define CONFIG_JFFS2_FS_DEBUG 1
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG > 0
+#define D1(x) x
+#else
+#define D1(x)
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG > 1
+#define D2(x) x
+#else
+#define D2(x)
+#endif
+
+#define JFFS2_NATIVE_ENDIAN
+
+/* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from
+   whatever OS we're actually running on here too. */
+
+#if defined(JFFS2_NATIVE_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){x})
+#define cpu_to_je32(x) ((jint32_t){x})
+#define cpu_to_jemode(x) ((jmode_t){os_to_jffs2_mode(x)})
+
+#define je16_to_cpu(x) ((x).v16)
+#define je32_to_cpu(x) ((x).v32)
+#define jemode_to_cpu(x) (jffs2_to_os_mode((x).m))
+#elif defined(JFFS2_BIG_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){cpu_to_be16(x)})
+#define cpu_to_je32(x) ((jint32_t){cpu_to_be32(x)})
+#define cpu_to_jemode(x) ((jmode_t){cpu_to_be32(os_to_jffs2_mode(x))})
+
+#define je16_to_cpu(x) (be16_to_cpu(x.v16))
+#define je32_to_cpu(x) (be32_to_cpu(x.v32))
+#define jemode_to_cpu(x) (be32_to_cpu(jffs2_to_os_mode((x).m)))
+#elif defined(JFFS2_LITTLE_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){cpu_to_le16(x)})
+#define cpu_to_je32(x) ((jint32_t){cpu_to_le32(x)})
+#define cpu_to_jemode(x) ((jmode_t){cpu_to_le32(os_to_jffs2_mode(x))})
+
+#define je16_to_cpu(x) (le16_to_cpu(x.v16))
+#define je32_to_cpu(x) (le32_to_cpu(x.v32))
+#define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m)))
+#else 
+#error wibble
+#endif
+
+/*
+  This is all we need to keep in-core for each raw node during normal
+  operation. As and when we do read_inode on a particular inode, we can
+  scan the nodes which are listed for it and build up a proper map of 
+  which nodes are currently valid. JFFSv1 always used to keep that whole
+  map in core for each inode.
+*/
+struct jffs2_raw_node_ref
+{
+	struct jffs2_raw_node_ref *next_in_ino; /* Points to the next raw_node_ref
+		for this inode. If this is the last, it points to the inode_cache
+		for this inode instead. The inode_cache will have NULL in the first
+		word so you know when you've got there :) */
+	struct jffs2_raw_node_ref *next_phys;
+	uint32_t flash_offset;
+	uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */
+};
+
+        /* flash_offset & 3 always has to be zero, because nodes are
+	   always aligned at 4 bytes. So we have a couple of extra bits
+	   to play with, which indicate the node's status; see below: */ 
+#define REF_UNCHECKED	0	/* We haven't yet checked the CRC or built its inode */
+#define REF_OBSOLETE	1	/* Obsolete, can be completely ignored */
+#define REF_PRISTINE	2	/* Completely clean. GC without looking */
+#define REF_NORMAL	3	/* Possibly overlapped. Read the page and write again on GC */
+#define ref_flags(ref)		((ref)->flash_offset & 3)
+#define ref_offset(ref)		((ref)->flash_offset & ~3)
+#define ref_obsolete(ref)	(((ref)->flash_offset & 3) == REF_OBSOLETE)
+#define mark_ref_normal(ref)    do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0)
+
+/* For each inode in the filesystem, we need to keep a record of
+   nlink, because it would be a PITA to scan the whole directory tree
+   at read_inode() time to calculate it, and to keep sufficient information
+   in the raw_node_ref (basically both parent and child inode number for 
+   dirent nodes) would take more space than this does. We also keep
+   a pointer to the first physical node which is part of this inode, too.
+*/
+struct jffs2_inode_cache {
+	struct jffs2_full_dirent *scan_dents; /* Used during scan to hold
+		temporary lists of dirents, and later must be set to
+		NULL to mark the end of the raw_node_ref->next_in_ino
+		chain. */
+	struct jffs2_inode_cache *next;
+	struct jffs2_raw_node_ref *nodes;
+	uint32_t ino;
+	int nlink;
+	int state;
+};
+
+/* Inode states for 'state' above. We need the 'GC' state to prevent
+   someone from doing a read_inode() while we're moving a 'REF_PRISTINE'
+   node without going through all the iget() nonsense */
+#define INO_STATE_UNCHECKED	0	/* CRC checks not yet done */
+#define INO_STATE_CHECKING	1	/* CRC checks in progress */
+#define INO_STATE_PRESENT	2	/* In core */
+#define INO_STATE_CHECKEDABSENT	3	/* Checked, cleared again */
+#define INO_STATE_GC		4	/* GCing a 'pristine' node */
+#define INO_STATE_READING	5	/* In read_inode() */
+
+#define INOCACHE_HASHSIZE 128
+
+/*
+  Larger representation of a raw node, kept in-core only when the 
+  struct inode for this particular ino is instantiated.
+*/
+
+struct jffs2_full_dnode
+{
+	struct jffs2_raw_node_ref *raw;
+	uint32_t ofs; /* The offset to which the data of this node belongs */
+	uint32_t size;
+	uint32_t frags; /* Number of fragments which currently refer
+			to this node. When this reaches zero, 
+			the node is obsolete.  */
+};
+
+/* 
+   Even larger representation of a raw node, kept in-core only while
+   we're actually building up the original map of which nodes go where,
+   in read_inode()
+*/
+struct jffs2_tmp_dnode_info
+{
+	struct jffs2_tmp_dnode_info *next;
+	struct jffs2_full_dnode *fn;
+	uint32_t version;
+};       
+
+struct jffs2_full_dirent
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_full_dirent *next;
+	uint32_t version;
+	uint32_t ino; /* == zero for unlink */
+	unsigned int nhash;
+	unsigned char type;
+	unsigned char name[0];
+};
+
+/*
+  Fragments - used to build a map of which raw node to obtain 
+  data from for each part of the ino
+*/
+struct jffs2_node_frag
+{
+	struct rb_node rb;
+	struct jffs2_full_dnode *node; /* NULL for holes */
+	uint32_t size;
+	uint32_t ofs; /* The offset to which this fragment belongs */
+};
+
+struct jffs2_eraseblock
+{
+	struct list_head list;
+	int bad_count;
+	uint32_t offset;		/* of this block in the MTD */
+
+	uint32_t unchecked_size;
+	uint32_t used_size;
+	uint32_t dirty_size;
+	uint32_t wasted_size;
+	uint32_t free_size;	/* Note that sector_size - free_size
+				   is the address of the first free space */
+	struct jffs2_raw_node_ref *first_node;
+	struct jffs2_raw_node_ref *last_node;
+
+	struct jffs2_raw_node_ref *gc_node;	/* Next node to be garbage collected */
+};
+
+#define ACCT_SANITY_CHECK(c, jeb) do { \
+		struct jffs2_eraseblock *___j = jeb; \
+		if ((___j) && ___j->used_size + ___j->dirty_size + ___j->free_size + ___j->wasted_size + ___j->unchecked_size != c->sector_size) { \
+		printk(KERN_NOTICE "Eeep. Space accounting for block at 0x%08x is screwed\n", ___j->offset); \
+		printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + wasted %08x + unchecked %08x != total %08x\n", \
+		___j->free_size, ___j->dirty_size, ___j->used_size, ___j->wasted_size, ___j->unchecked_size, c->sector_size); \
+		BUG(); \
+	} \
+	if (c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size + c->wasted_size + c->unchecked_size != c->flash_size) { \
+		printk(KERN_NOTICE "Eeep. Space accounting superblock info is screwed\n"); \
+		printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + erasing %08x + bad %08x + wasted %08x + unchecked %08x != total %08x\n", \
+		c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c->wasted_size, c->unchecked_size, c->flash_size); \
+		BUG(); \
+	} \
+} while(0)
+
+static inline void paranoia_failed_dump(struct jffs2_eraseblock *jeb)
+{
+	struct jffs2_raw_node_ref *ref;
+	int i=0;
+
+	printk(KERN_NOTICE);
+	for (ref = jeb->first_node; ref; ref = ref->next_phys) {
+		printk("%08x->", ref_offset(ref));
+		if (++i == 8) {
+			i = 0;
+			printk("\n" KERN_NOTICE);
+		}
+	}
+	printk("\n");
+}
+
+
+#define ACCT_PARANOIA_CHECK(jeb) do { \
+		uint32_t my_used_size = 0; \
+		uint32_t my_unchecked_size = 0; \
+		struct jffs2_raw_node_ref *ref2 = jeb->first_node; \
+		while (ref2) { \
+			if (unlikely(ref2->flash_offset < jeb->offset || \
+				     ref2->flash_offset > jeb->offset + c->sector_size)) { \
+				printk(KERN_NOTICE "Node %08x shouldn't be in block at %08x!\n", \
+				       ref_offset(ref2), jeb->offset); \
+				paranoia_failed_dump(jeb); \
+				BUG(); \
+			} \
+			if (ref_flags(ref2) == REF_UNCHECKED) \
+				my_unchecked_size += ref_totlen(c, jeb, ref2); \
+			else if (!ref_obsolete(ref2)) \
+				my_used_size += ref_totlen(c, jeb, ref2); \
+			if (unlikely((!ref2->next_phys) != (ref2 == jeb->last_node))) { \
+                                if (!ref2->next_phys) \
+				       printk("ref for node at %p (phys %08x) has next_phys->%p (----), last_node->%p (phys %08x)\n", \
+				             ref2, ref_offset(ref2), ref2->next_phys, \
+				             jeb->last_node, ref_offset(jeb->last_node)); \
+                                else \
+                                       printk("ref for node at %p (phys %08x) has next_phys->%p (%08x), last_node->%p (phys %08x)\n", \
+				             ref2, ref_offset(ref2), ref2->next_phys, ref_offset(ref2->next_phys), \
+				             jeb->last_node, ref_offset(jeb->last_node)); \
+				paranoia_failed_dump(jeb); \
+				BUG(); \
+			} \
+			ref2 = ref2->next_phys; \
+		} \
+		if (my_used_size != jeb->used_size) { \
+			printk(KERN_NOTICE "Calculated used size %08x != stored used size %08x\n", my_used_size, jeb->used_size); \
+			BUG(); \
+		} \
+		if (my_unchecked_size != jeb->unchecked_size) { \
+			printk(KERN_NOTICE "Calculated unchecked size %08x != stored unchecked size %08x\n", my_unchecked_size, jeb->unchecked_size); \
+			BUG(); \
+		} \
+	} while(0)
+
+/* Calculate totlen from surrounding nodes or eraseblock */
+static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
+				    struct jffs2_eraseblock *jeb,
+				    struct jffs2_raw_node_ref *ref)
+{
+	uint32_t ref_end;
+	
+	if (ref->next_phys)
+		ref_end = ref_offset(ref->next_phys);
+	else {
+		if (!jeb)
+			jeb = &c->blocks[ref->flash_offset / c->sector_size];
+
+		/* Last node in block. Use free_space */
+		BUG_ON(ref != jeb->last_node);
+		ref_end = jeb->offset + c->sector_size - jeb->free_size;
+	}
+	return ref_end - ref_offset(ref);
+}
+
+static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
+				  struct jffs2_eraseblock *jeb,
+				  struct jffs2_raw_node_ref *ref)
+{
+	uint32_t ret;
+
+	D1(if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) {
+		printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n",
+		       jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref));
+		BUG();
+	})
+
+#if 1
+	ret = ref->__totlen;
+#else
+	/* This doesn't actually work yet */
+	ret = __ref_totlen(c, jeb, ref);
+	if (ret != ref->__totlen) {
+		printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
+		       ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
+		       ret, ref->__totlen);
+		if (!jeb)
+			jeb = &c->blocks[ref->flash_offset / c->sector_size];
+		paranoia_failed_dump(jeb);
+		BUG();
+	}
+#endif
+	return ret;
+}
+
+
+#define ALLOC_NORMAL	0	/* Normal allocation */
+#define ALLOC_DELETION	1	/* Deletion node. Best to allow it */
+#define ALLOC_GC	2	/* Space requested for GC. Give it or die */
+#define ALLOC_NORETRY	3	/* For jffs2_write_dnode: On failure, return -EAGAIN instead of retrying */
+
+/* How much dirty space before it goes on the very_dirty_list */
+#define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2))
+
+/* check if dirty space is more than 255 Byte */
+#define ISDIRTY(size) ((size) >  sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) 
+
+#define PAD(x) (((x)+3)&~3)
+
+static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw)
+{
+	while(raw->next_in_ino) {
+		raw = raw->next_in_ino;
+	}
+
+	return ((struct jffs2_inode_cache *)raw);
+}
+
+static inline struct jffs2_node_frag *frag_first(struct rb_root *root)
+{
+	struct rb_node *node = root->rb_node;
+
+	if (!node)
+		return NULL;
+	while(node->rb_left)
+		node = node->rb_left;
+	return rb_entry(node, struct jffs2_node_frag, rb);
+}
+#define rb_parent(rb) ((rb)->rb_parent)
+#define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
+#define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
+#define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
+#define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
+#define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
+#define frag_erase(frag, list) rb_erase(&frag->rb, list);
+
+/* nodelist.c */
+D2(void jffs2_print_frag_list(struct jffs2_inode_info *f));
+void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list);
+int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			  struct jffs2_tmp_dnode_info **tnp, struct jffs2_full_dirent **fdp,
+			  uint32_t *highest_version, uint32_t *latest_mctime,
+			  uint32_t *mctime_ver);
+void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state);
+struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
+void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new);
+void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old);
+void jffs2_free_ino_caches(struct jffs2_sb_info *c);
+void jffs2_free_raw_node_refs(struct jffs2_sb_info *c);
+struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset);
+void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete);
+void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base);
+struct rb_node *rb_next(struct rb_node *);
+struct rb_node *rb_prev(struct rb_node *);
+void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root);
+
+/* nodemgmt.c */
+int jffs2_thread_should_wake(struct jffs2_sb_info *c);
+int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio);
+int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
+int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new);
+void jffs2_complete_reservation(struct jffs2_sb_info *c);
+void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw);
+void jffs2_dump_block_lists(struct jffs2_sb_info *c);
+
+/* write.c */
+int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri);
+
+struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode);
+struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode);
+int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			    struct jffs2_raw_inode *ri, unsigned char *buf, 
+			    uint32_t offset, uint32_t writelen, uint32_t *retlen);
+int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen);
+int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f);
+int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen);
+
+
+/* readinode.c */
+void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
+int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
+int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, 
+			uint32_t ino, struct jffs2_raw_inode *latest_node);
+int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
+void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
+
+/* malloc.c */
+int jffs2_create_slab_caches(void);
+void jffs2_destroy_slab_caches(void);
+
+struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize);
+void jffs2_free_full_dirent(struct jffs2_full_dirent *);
+struct jffs2_full_dnode *jffs2_alloc_full_dnode(void);
+void jffs2_free_full_dnode(struct jffs2_full_dnode *);
+struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void);
+void jffs2_free_raw_dirent(struct jffs2_raw_dirent *);
+struct jffs2_raw_inode *jffs2_alloc_raw_inode(void);
+void jffs2_free_raw_inode(struct jffs2_raw_inode *);
+struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void);
+void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *);
+struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void);
+void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *);
+struct jffs2_node_frag *jffs2_alloc_node_frag(void);
+void jffs2_free_node_frag(struct jffs2_node_frag *);
+struct jffs2_inode_cache *jffs2_alloc_inode_cache(void);
+void jffs2_free_inode_cache(struct jffs2_inode_cache *);
+
+/* gc.c */
+int jffs2_garbage_collect_pass(struct jffs2_sb_info *c);
+
+/* read.c */
+int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+		     struct jffs2_full_dnode *fd, unsigned char *buf,
+		     int ofs, int len);
+int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			   unsigned char *buf, uint32_t offset, uint32_t len);
+char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
+
+/* scan.c */
+int jffs2_scan_medium(struct jffs2_sb_info *c);
+void jffs2_rotate_lists(struct jffs2_sb_info *c);
+
+/* build.c */
+int jffs2_do_mount_fs(struct jffs2_sb_info *c);
+
+/* erase.c */
+void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count);
+
+#ifdef CONFIG_JFFS2_FS_NAND
+/* wbuf.c */
+int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino);
+int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c);
+int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+#endif
+
+#endif /* __JFFS2_NODELIST_H__ */
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
new file mode 100644
index 000000000000..2651135bdf42
--- /dev/null
+++ b/fs/jffs2/nodemgmt.c
@@ -0,0 +1,838 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: nodemgmt.c,v 1.115 2004/11/22 11:07:21 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/compiler.h>
+#include <linux/sched.h> /* For cond_resched() */
+#include "nodelist.h"
+
+/**
+ *	jffs2_reserve_space - request physical space to write nodes to flash
+ *	@c: superblock info
+ *	@minsize: Minimum acceptable size of allocation
+ *	@ofs: Returned value of node offset
+ *	@len: Returned value of allocation length
+ *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
+ *
+ *	Requests a block of physical space on the flash. Returns zero for success
+ *	and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
+ *	or other error if appropriate.
+ *
+ *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
+ *	allocation semaphore, to prevent more than one allocation from being
+ *	active at any time. The semaphore is later released by jffs2_commit_allocation()
+ *
+ *	jffs2_reserve_space() may trigger garbage collection in order to make room
+ *	for the requested allocation.
+ */
+
+static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len);
+
+int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
+{
+	int ret = -EAGAIN;
+	int blocksneeded = c->resv_blocks_write;
+	/* align it */
+	minsize = PAD(minsize);
+
+	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
+	down(&c->alloc_sem);
+
+	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
+
+	spin_lock(&c->erase_completion_lock);
+
+	/* this needs a little more thought (true <tglx> :)) */
+	while(ret == -EAGAIN) {
+		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
+			int ret;
+			uint32_t dirty, avail;
+
+			/* calculate real dirty size
+			 * dirty_size contains blocks on erase_pending_list
+			 * those blocks are counted in c->nr_erasing_blocks.
+			 * If one block is actually erased, it is not longer counted as dirty_space
+			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
+			 * with c->nr_erasing_blocks * c->sector_size again.
+			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
+			 * This helps us to force gc and pick eventually a clean block to spread the load.
+			 * We add unchecked_size here, as we hopefully will find some space to use.
+			 * This will affect the sum only once, as gc first finishes checking
+			 * of nodes.
+			 */
+			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
+			if (dirty < c->nospc_dirty_size) {
+				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
+					printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n");
+					break;
+				}
+				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
+					  dirty, c->unchecked_size, c->sector_size));
+
+				spin_unlock(&c->erase_completion_lock);
+				up(&c->alloc_sem);
+				return -ENOSPC;
+			}
+			
+			/* Calc possibly available space. Possibly available means that we
+			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
+			 * more usable space. This will affect the sum only once, as gc first finishes checking
+			 * of nodes.
+			 + Return -ENOSPC, if the maximum possibly available space is less or equal than 
+			 * blocksneeded * sector_size.
+			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
+			 * the check above passes.
+			 */
+			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
+			if ( (avail / c->sector_size) <= blocksneeded) {
+				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
+					printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n");
+					break;
+				}
+
+				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
+					  avail, blocksneeded * c->sector_size));
+				spin_unlock(&c->erase_completion_lock);
+				up(&c->alloc_sem);
+				return -ENOSPC;
+			}
+
+			up(&c->alloc_sem);
+
+			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
+				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
+				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
+			spin_unlock(&c->erase_completion_lock);
+			
+			ret = jffs2_garbage_collect_pass(c);
+			if (ret)
+				return ret;
+
+			cond_resched();
+
+			if (signal_pending(current))
+				return -EINTR;
+
+			down(&c->alloc_sem);
+			spin_lock(&c->erase_completion_lock);
+		}
+
+		ret = jffs2_do_reserve_space(c, minsize, ofs, len);
+		if (ret) {
+			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
+		}
+	}
+	spin_unlock(&c->erase_completion_lock);
+	if (ret)
+		up(&c->alloc_sem);
+	return ret;
+}
+
+int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
+{
+	int ret = -EAGAIN;
+	minsize = PAD(minsize);
+
+	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
+
+	spin_lock(&c->erase_completion_lock);
+	while(ret == -EAGAIN) {
+		ret = jffs2_do_reserve_space(c, minsize, ofs, len);
+		if (ret) {
+		        D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
+		}
+	}
+	spin_unlock(&c->erase_completion_lock);
+	return ret;
+}
+
+/* Called with alloc sem _and_ erase_completion_lock */
+static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len)
+{
+	struct jffs2_eraseblock *jeb = c->nextblock;
+	
+ restart:
+	if (jeb && minsize > jeb->free_size) {
+		/* Skip the end of this block and file it as having some dirty space */
+		/* If there's a pending write to it, flush now */
+		if (jffs2_wbuf_dirty(c)) {
+			spin_unlock(&c->erase_completion_lock);
+			D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));			    
+			jffs2_flush_wbuf_pad(c);
+			spin_lock(&c->erase_completion_lock);
+			jeb = c->nextblock;
+			goto restart;
+		}
+		c->wasted_size += jeb->free_size;
+		c->free_size -= jeb->free_size;
+		jeb->wasted_size += jeb->free_size;
+		jeb->free_size = 0;
+		
+		/* Check, if we have a dirty block now, or if it was dirty already */
+		if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
+			c->dirty_size += jeb->wasted_size;
+			c->wasted_size -= jeb->wasted_size;
+			jeb->dirty_size += jeb->wasted_size;
+			jeb->wasted_size = 0;
+			if (VERYDIRTY(c, jeb->dirty_size)) {
+				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+				list_add_tail(&jeb->list, &c->very_dirty_list);
+			} else {
+				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+				list_add_tail(&jeb->list, &c->dirty_list);
+			}
+		} else { 
+			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+			list_add_tail(&jeb->list, &c->clean_list);
+		}
+		c->nextblock = jeb = NULL;
+	}
+	
+	if (!jeb) {
+		struct list_head *next;
+		/* Take the next block off the 'free' list */
+
+		if (list_empty(&c->free_list)) {
+
+			if (!c->nr_erasing_blocks && 
+			    !list_empty(&c->erasable_list)) {
+				struct jffs2_eraseblock *ejeb;
+
+				ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
+				list_del(&ejeb->list);
+				list_add_tail(&ejeb->list, &c->erase_pending_list);
+				c->nr_erasing_blocks++;
+				jffs2_erase_pending_trigger(c);
+				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
+					  ejeb->offset));
+			}
+
+			if (!c->nr_erasing_blocks && 
+			    !list_empty(&c->erasable_pending_wbuf_list)) {
+				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
+				/* c->nextblock is NULL, no update to c->nextblock allowed */			    
+				spin_unlock(&c->erase_completion_lock);
+				jffs2_flush_wbuf_pad(c);
+				spin_lock(&c->erase_completion_lock);
+				/* Have another go. It'll be on the erasable_list now */
+				return -EAGAIN;
+			}
+
+			if (!c->nr_erasing_blocks) {
+				/* Ouch. We're in GC, or we wouldn't have got here.
+				   And there's no space left. At all. */
+				printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", 
+				       c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", 
+				       list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
+				return -ENOSPC;
+			}
+
+			spin_unlock(&c->erase_completion_lock);
+			/* Don't wait for it; just erase one right now */
+			jffs2_erase_pending_blocks(c, 1);
+			spin_lock(&c->erase_completion_lock);
+
+			/* An erase may have failed, decreasing the
+			   amount of free space available. So we must
+			   restart from the beginning */
+			return -EAGAIN;
+		}
+
+		next = c->free_list.next;
+		list_del(next);
+		c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
+		c->nr_free_blocks--;
+
+		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
+			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
+			goto restart;
+		}
+	}
+	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
+	   enough space */
+	*ofs = jeb->offset + (c->sector_size - jeb->free_size);
+	*len = jeb->free_size;
+
+	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
+	    !jeb->first_node->next_in_ino) {
+		/* Only node in it beforehand was a CLEANMARKER node (we think). 
+		   So mark it obsolete now that there's going to be another node
+		   in the block. This will reduce used_size to zero but We've 
+		   already set c->nextblock so that jffs2_mark_node_obsolete()
+		   won't try to refile it to the dirty_list.
+		*/
+		spin_unlock(&c->erase_completion_lock);
+		jffs2_mark_node_obsolete(c, jeb->first_node);
+		spin_lock(&c->erase_completion_lock);
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
+	return 0;
+}
+
+/**
+ *	jffs2_add_physical_node_ref - add a physical node reference to the list
+ *	@c: superblock info
+ *	@new: new node reference to add
+ *	@len: length of this physical node
+ *	@dirty: dirty flag for new node
+ *
+ *	Should only be used to report nodes for which space has been allocated 
+ *	by jffs2_reserve_space.
+ *
+ *	Must be called with the alloc_sem held.
+ */
+ 
+int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
+{
+	struct jffs2_eraseblock *jeb;
+	uint32_t len;
+
+	jeb = &c->blocks[new->flash_offset / c->sector_size];
+	len = ref_totlen(c, jeb, new);
+
+	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
+#if 1
+	if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) {
+		printk(KERN_WARNING "argh. node added in wrong place\n");
+		jffs2_free_raw_node_ref(new);
+		return -EINVAL;
+	}
+#endif
+	spin_lock(&c->erase_completion_lock);
+
+	if (!jeb->first_node)
+		jeb->first_node = new;
+	if (jeb->last_node)
+		jeb->last_node->next_phys = new;
+	jeb->last_node = new;
+
+	jeb->free_size -= len;
+	c->free_size -= len;
+	if (ref_obsolete(new)) {
+		jeb->dirty_size += len;
+		c->dirty_size += len;
+	} else {
+		jeb->used_size += len;
+		c->used_size += len;
+	}
+
+	if (!jeb->free_size && !jeb->dirty_size) {
+		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
+		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+		if (jffs2_wbuf_dirty(c)) {
+			/* Flush the last write in the block if it's outstanding */
+			spin_unlock(&c->erase_completion_lock);
+			jffs2_flush_wbuf_pad(c);
+			spin_lock(&c->erase_completion_lock);
+		}
+
+		list_add_tail(&jeb->list, &c->clean_list);
+		c->nextblock = NULL;
+	}
+	ACCT_SANITY_CHECK(c,jeb);
+	D1(ACCT_PARANOIA_CHECK(jeb));
+
+	spin_unlock(&c->erase_completion_lock);
+
+	return 0;
+}
+
+
+void jffs2_complete_reservation(struct jffs2_sb_info *c)
+{
+	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
+	jffs2_garbage_collect_trigger(c);
+	up(&c->alloc_sem);
+}
+
+static inline int on_list(struct list_head *obj, struct list_head *head)
+{
+	struct list_head *this;
+
+	list_for_each(this, head) {
+		if (this == obj) {
+			D1(printk("%p is on list at %p\n", obj, head));
+			return 1;
+
+		}
+	}
+	return 0;
+}
+
+void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
+{
+	struct jffs2_eraseblock *jeb;
+	int blocknr;
+	struct jffs2_unknown_node n;
+	int ret, addedsize;
+	size_t retlen;
+
+	if(!ref) {
+		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
+		return;
+	}
+	if (ref_obsolete(ref)) {
+		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
+		return;
+	}
+	blocknr = ref->flash_offset / c->sector_size;
+	if (blocknr >= c->nr_blocks) {
+		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
+		BUG();
+	}
+	jeb = &c->blocks[blocknr];
+
+	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
+	    !(c->flags & JFFS2_SB_FLAG_MOUNTING)) {
+		/* Hm. This may confuse static lock analysis. If any of the above 
+		   three conditions is false, we're going to return from this 
+		   function without actually obliterating any nodes or freeing
+		   any jffs2_raw_node_refs. So we don't need to stop erases from
+		   happening, or protect against people holding an obsolete
+		   jffs2_raw_node_ref without the erase_completion_lock. */
+		down(&c->erase_free_sem);
+	}
+
+	spin_lock(&c->erase_completion_lock);
+
+	if (ref_flags(ref) == REF_UNCHECKED) {
+		D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
+			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
+			       ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
+			BUG();
+		})
+		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
+		jeb->unchecked_size -= ref_totlen(c, jeb, ref);
+		c->unchecked_size -= ref_totlen(c, jeb, ref);
+	} else {
+		D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
+			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
+			       ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
+			BUG();
+		})
+		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
+		jeb->used_size -= ref_totlen(c, jeb, ref);
+		c->used_size -= ref_totlen(c, jeb, ref);
+	}
+
+	// Take care, that wasted size is taken into concern
+	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
+		D1(printk("Dirtying\n"));
+		addedsize = ref_totlen(c, jeb, ref);
+		jeb->dirty_size += ref_totlen(c, jeb, ref);
+		c->dirty_size += ref_totlen(c, jeb, ref);
+
+		/* Convert wasted space to dirty, if not a bad block */
+		if (jeb->wasted_size) {
+			if (on_list(&jeb->list, &c->bad_used_list)) {
+				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
+					  jeb->offset));
+				addedsize = 0; /* To fool the refiling code later */
+			} else {
+				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
+					  jeb->wasted_size, jeb->offset));
+				addedsize += jeb->wasted_size;
+				jeb->dirty_size += jeb->wasted_size;
+				c->dirty_size += jeb->wasted_size;
+				c->wasted_size -= jeb->wasted_size;
+				jeb->wasted_size = 0;
+			}
+		}
+	} else {
+		D1(printk("Wasting\n"));
+		addedsize = 0;
+		jeb->wasted_size += ref_totlen(c, jeb, ref);
+		c->wasted_size += ref_totlen(c, jeb, ref);	
+	}
+	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
+	
+	ACCT_SANITY_CHECK(c, jeb);
+
+	D1(ACCT_PARANOIA_CHECK(jeb));
+
+	if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
+		/* Mount in progress. Don't muck about with the block
+		   lists because they're not ready yet, and don't actually
+		   obliterate nodes that look obsolete. If they weren't 
+		   marked obsolete on the flash at the time they _became_
+		   obsolete, there was probably a reason for that. */
+		spin_unlock(&c->erase_completion_lock);
+		/* We didn't lock the erase_free_sem */
+		return;
+	}
+
+	if (jeb == c->nextblock) {
+		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
+	} else if (!jeb->used_size && !jeb->unchecked_size) {
+		if (jeb == c->gcblock) {
+			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
+			c->gcblock = NULL;
+		} else {
+			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
+			list_del(&jeb->list);
+		}
+		if (jffs2_wbuf_dirty(c)) {
+			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
+			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
+		} else {
+			if (jiffies & 127) {
+				/* Most of the time, we just erase it immediately. Otherwise we
+				   spend ages scanning it on mount, etc. */
+				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
+				list_add_tail(&jeb->list, &c->erase_pending_list);
+				c->nr_erasing_blocks++;
+				jffs2_erase_pending_trigger(c);
+			} else {
+				/* Sometimes, however, we leave it elsewhere so it doesn't get
+				   immediately reused, and we spread the load a bit. */
+				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
+				list_add_tail(&jeb->list, &c->erasable_list);
+			}				
+		}
+		D1(printk(KERN_DEBUG "Done OK\n"));
+	} else if (jeb == c->gcblock) {
+		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
+	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
+		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
+		list_del(&jeb->list);
+		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
+		list_add_tail(&jeb->list, &c->dirty_list);
+	} else if (VERYDIRTY(c, jeb->dirty_size) &&
+		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
+		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
+		list_del(&jeb->list);
+		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
+		list_add_tail(&jeb->list, &c->very_dirty_list);
+	} else {
+		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
+			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); 
+	}			  	
+
+	spin_unlock(&c->erase_completion_lock);
+
+	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c)) {
+		/* We didn't lock the erase_free_sem */
+		return;
+	}
+
+	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
+	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
+	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
+	   by jffs2_free_all_node_refs() in erase.c. Which is nice. */
+
+	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
+	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
+	if (ret) {
+		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
+		goto out_erase_sem;
+	}
+	if (retlen != sizeof(n)) {
+		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
+		goto out_erase_sem;
+	}
+	if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
+		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
+		goto out_erase_sem;
+	}
+	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
+		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
+		goto out_erase_sem;
+	}
+	/* XXX FIXME: This is ugly now */
+	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
+	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
+	if (ret) {
+		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
+		goto out_erase_sem;
+	}
+	if (retlen != sizeof(n)) {
+		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
+		goto out_erase_sem;
+	}
+
+	/* Nodes which have been marked obsolete no longer need to be
+	   associated with any inode. Remove them from the per-inode list.
+	   
+	   Note we can't do this for NAND at the moment because we need 
+	   obsolete dirent nodes to stay on the lists, because of the
+	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
+	   because we delete the inocache, and on NAND we need that to 
+	   stay around until all the nodes are actually erased, in order
+	   to stop us from giving the same inode number to another newly
+	   created inode. */
+	if (ref->next_in_ino) {
+		struct jffs2_inode_cache *ic;
+		struct jffs2_raw_node_ref **p;
+
+		spin_lock(&c->erase_completion_lock);
+
+		ic = jffs2_raw_ref_to_ic(ref);
+		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
+			;
+
+		*p = ref->next_in_ino;
+		ref->next_in_ino = NULL;
+
+		if (ic->nodes == (void *)ic) {
+			D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino));
+			jffs2_del_ino_cache(c, ic);
+			jffs2_free_inode_cache(ic);
+		}
+
+		spin_unlock(&c->erase_completion_lock);
+	}
+
+
+	/* Merge with the next node in the physical list, if there is one
+	   and if it's also obsolete and if it doesn't belong to any inode */
+	if (ref->next_phys && ref_obsolete(ref->next_phys) &&
+	    !ref->next_phys->next_in_ino) {
+		struct jffs2_raw_node_ref *n = ref->next_phys;
+		
+		spin_lock(&c->erase_completion_lock);
+
+		ref->__totlen += n->__totlen;
+		ref->next_phys = n->next_phys;
+                if (jeb->last_node == n) jeb->last_node = ref;
+		if (jeb->gc_node == n) {
+			/* gc will be happy continuing gc on this node */
+			jeb->gc_node=ref;
+		}
+		spin_unlock(&c->erase_completion_lock);
+
+		jffs2_free_raw_node_ref(n);
+	}
+	
+	/* Also merge with the previous node in the list, if there is one
+	   and that one is obsolete */
+	if (ref != jeb->first_node ) {
+		struct jffs2_raw_node_ref *p = jeb->first_node;
+
+		spin_lock(&c->erase_completion_lock);
+
+		while (p->next_phys != ref)
+			p = p->next_phys;
+		
+		if (ref_obsolete(p) && !ref->next_in_ino) {
+			p->__totlen += ref->__totlen;
+			if (jeb->last_node == ref) {
+				jeb->last_node = p;
+			}
+			if (jeb->gc_node == ref) {
+				/* gc will be happy continuing gc on this node */
+				jeb->gc_node=p;
+			}
+			p->next_phys = ref->next_phys;
+			jffs2_free_raw_node_ref(ref);
+		}
+		spin_unlock(&c->erase_completion_lock);
+	}
+ out_erase_sem:
+	up(&c->erase_free_sem);
+}
+
+#if CONFIG_JFFS2_FS_DEBUG >= 2
+void jffs2_dump_block_lists(struct jffs2_sb_info *c)
+{
+
+
+	printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
+	printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
+	printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
+	printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
+	printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
+	printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
+	printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
+	printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
+	printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
+	printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
+	printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
+
+	if (c->nextblock) {
+		printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+		       c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
+	} else {
+		printk(KERN_DEBUG "nextblock: NULL\n");
+	}
+	if (c->gcblock) {
+		printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+		       c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
+	} else {
+		printk(KERN_DEBUG "gcblock: NULL\n");
+	}
+	if (list_empty(&c->clean_list)) {
+		printk(KERN_DEBUG "clean_list: empty\n");
+	} else {
+		struct list_head *this;
+		int	numblocks = 0;
+		uint32_t dirty = 0;
+
+		list_for_each(this, &c->clean_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			numblocks ++;
+			dirty += jeb->wasted_size;
+			printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+		printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
+	}
+	if (list_empty(&c->very_dirty_list)) {
+		printk(KERN_DEBUG "very_dirty_list: empty\n");
+	} else {
+		struct list_head *this;
+		int	numblocks = 0;
+		uint32_t dirty = 0;
+
+		list_for_each(this, &c->very_dirty_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			numblocks ++;
+			dirty += jeb->dirty_size;
+			printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+		printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
+			numblocks, dirty, dirty / numblocks);
+	}
+	if (list_empty(&c->dirty_list)) {
+		printk(KERN_DEBUG "dirty_list: empty\n");
+	} else {
+		struct list_head *this;
+		int	numblocks = 0;
+		uint32_t dirty = 0;
+
+		list_for_each(this, &c->dirty_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			numblocks ++;
+			dirty += jeb->dirty_size;
+			printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+		printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
+			numblocks, dirty, dirty / numblocks);
+	}
+	if (list_empty(&c->erasable_list)) {
+		printk(KERN_DEBUG "erasable_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->erasable_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->erasing_list)) {
+		printk(KERN_DEBUG "erasing_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->erasing_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->erase_pending_list)) {
+		printk(KERN_DEBUG "erase_pending_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->erase_pending_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->erasable_pending_wbuf_list)) {
+		printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->erasable_pending_wbuf_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->free_list)) {
+		printk(KERN_DEBUG "free_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->free_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->bad_list)) {
+		printk(KERN_DEBUG "bad_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->bad_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+	if (list_empty(&c->bad_used_list)) {
+		printk(KERN_DEBUG "bad_used_list: empty\n");
+	} else {
+		struct list_head *this;
+
+		list_for_each(this, &c->bad_used_list) {
+			struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+			printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
+			       jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
+		}
+	}
+}
+#endif /* CONFIG_JFFS2_FS_DEBUG */
+
+int jffs2_thread_should_wake(struct jffs2_sb_info *c)
+{
+	int ret = 0;
+	uint32_t dirty;
+
+	if (c->unchecked_size) {
+		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
+			  c->unchecked_size, c->checked_ino));
+		return 1;
+	}
+
+	/* dirty_size contains blocks on erase_pending_list
+	 * those blocks are counted in c->nr_erasing_blocks.
+	 * If one block is actually erased, it is not longer counted as dirty_space
+	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
+	 * with c->nr_erasing_blocks * c->sector_size again.
+	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
+	 * This helps us to force gc and pick eventually a clean block to spread the load.
+	 */
+	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
+
+	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && 
+			(dirty > c->nospc_dirty_size)) 
+		ret = 1;
+
+	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", 
+		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
+
+	return ret;
+}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
new file mode 100644
index 000000000000..03b0acc37b73
--- /dev/null
+++ b/fs/jffs2/os-linux.h
@@ -0,0 +1,217 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2002-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: os-linux.h,v 1.51 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#ifndef __JFFS2_OS_LINUX_H__
+#define __JFFS2_OS_LINUX_H__
+#include <linux/version.h>
+
+/* JFFS2 uses Linux mode bits natively -- no need for conversion */
+#define os_to_jffs2_mode(x) (x)
+#define jffs2_to_os_mode(x) (x)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,73)
+#define kstatfs statfs
+#endif
+
+struct kstatfs;
+struct kvec;
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2)
+#define JFFS2_INODE_INFO(i) (list_entry(i, struct jffs2_inode_info, vfs_inode))
+#define OFNI_EDONI_2SFFJ(f)  (&(f)->vfs_inode)
+#define JFFS2_SB_INFO(sb) (sb->s_fs_info)
+#define OFNI_BS_2SFFJ(c)  ((struct super_block *)c->os_priv)
+#elif defined(JFFS2_OUT_OF_KERNEL)
+#define JFFS2_INODE_INFO(i) ((struct jffs2_inode_info *) &(i)->u)
+#define OFNI_EDONI_2SFFJ(f)  ((struct inode *) ( ((char *)f) - ((char *)(&((struct inode *)NULL)->u)) ) )
+#define JFFS2_SB_INFO(sb) ((struct jffs2_sb_info *) &(sb)->u)
+#define OFNI_BS_2SFFJ(c)  ((struct super_block *) ( ((char *)c) - ((char *)(&((struct super_block *)NULL)->u)) ) )
+#else
+#define JFFS2_INODE_INFO(i) (&i->u.jffs2_i)
+#define OFNI_EDONI_2SFFJ(f)  ((struct inode *) ( ((char *)f) - ((char *)(&((struct inode *)NULL)->u)) ) )
+#define JFFS2_SB_INFO(sb) (&sb->u.jffs2_sb)
+#define OFNI_BS_2SFFJ(c)  ((struct super_block *) ( ((char *)c) - ((char *)(&((struct super_block *)NULL)->u)) ) )
+#endif
+
+
+#define JFFS2_F_I_SIZE(f) (OFNI_EDONI_2SFFJ(f)->i_size)
+#define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode)
+#define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid)
+#define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid)
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,1)
+#define JFFS2_F_I_RDEV_MIN(f) (iminor(OFNI_EDONI_2SFFJ(f)))
+#define JFFS2_F_I_RDEV_MAJ(f) (imajor(OFNI_EDONI_2SFFJ(f)))
+#else
+#define JFFS2_F_I_RDEV_MIN(f) (MINOR(to_kdev_t(OFNI_EDONI_2SFFJ(f)->i_rdev)))
+#define JFFS2_F_I_RDEV_MAJ(f) (MAJOR(to_kdev_t(OFNI_EDONI_2SFFJ(f)->i_rdev)))
+#endif
+
+/* Urgh. The things we do to keep the 2.4 build working */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,47)
+#define ITIME(sec) ((struct timespec){sec, 0})
+#define I_SEC(tv) ((tv).tv_sec)
+#define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime.tv_sec)
+#define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime.tv_sec)
+#define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime.tv_sec)
+#else
+#define ITIME(x) (x)
+#define I_SEC(x) (x)
+#define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime)
+#define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime)
+#define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime)
+#endif
+
+#define sleep_on_spinunlock(wq, s)				\
+	do {							\
+		DECLARE_WAITQUEUE(__wait, current);		\
+		add_wait_queue((wq), &__wait);			\
+		set_current_state(TASK_UNINTERRUPTIBLE);	\
+		spin_unlock(s);					\
+		schedule();					\
+		remove_wait_queue((wq), &__wait);		\
+	} while(0)
+
+static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
+{
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2)
+	f->highest_version = 0;
+	f->fragtree = RB_ROOT;
+	f->metadata = NULL;
+	f->dents = NULL;
+	f->flags = 0;
+	f->usercompr = 0;
+#else
+	memset(f, 0, sizeof(*f));
+	init_MUTEX_LOCKED(&f->sem);
+#endif
+}
+
+#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+
+#if (!defined CONFIG_JFFS2_FS_NAND && !defined CONFIG_JFFS2_FS_NOR_ECC)
+#define jffs2_can_mark_obsolete(c) (1)
+#define jffs2_cleanmarker_oob(c) (0)
+#define jffs2_write_nand_cleanmarker(c,jeb) (-EIO)
+
+#define jffs2_flash_write(c, ofs, len, retlen, buf) ((c)->mtd->write((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flush_wbuf_pad(c) ({ (void)(c), 0; })
+#define jffs2_flush_wbuf_gc(c, i) ({ (void)(c), (void) i, 0; })
+#define jffs2_write_nand_badblock(c,jeb,bad_offset) (1)
+#define jffs2_nand_flash_setup(c) (0)
+#define jffs2_nand_flash_cleanup(c) do {} while(0)
+#define jffs2_wbuf_dirty(c) (0)
+#define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e)
+#define jffs2_wbuf_timeout NULL
+#define jffs2_wbuf_process NULL
+#define jffs2_nor_ecc(c) (0)
+#define jffs2_nor_ecc_flash_setup(c) (0)
+#define jffs2_nor_ecc_flash_cleanup(c) do {} while (0)
+
+#else /* NAND and/or ECC'd NOR support present */
+
+#define jffs2_can_mark_obsolete(c) ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & MTD_ECC)) || c->mtd->type == MTD_RAM)
+#define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH)
+
+#define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len)
+
+/* wbuf.c */
+int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino);
+int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf);
+int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf);
+int jffs2_check_oob_empty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,int mode);
+int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
+void jffs2_wbuf_timeout(unsigned long data);
+void jffs2_wbuf_process(void *data);
+int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino);
+int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c);
+int jffs2_nand_flash_setup(struct jffs2_sb_info *c);
+void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c);
+#ifdef CONFIG_JFFS2_FS_NOR_ECC
+#define jffs2_nor_ecc(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_ECC))
+int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c);
+void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c);
+#else
+#define jffs2_nor_ecc(c) (0)
+#define jffs2_nor_ecc_flash_setup(c) (0)
+#define jffs2_nor_ecc_flash_cleanup(c) do {} while (0)
+#endif /* NOR ECC */
+#endif /* NAND */
+
+/* erase.c */
+static inline void jffs2_erase_pending_trigger(struct jffs2_sb_info *c)
+{
+	OFNI_BS_2SFFJ(c)->s_dirt = 1;
+}
+
+/* background.c */
+int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
+void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
+void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c);
+
+/* dir.c */
+extern struct file_operations jffs2_dir_operations;
+extern struct inode_operations jffs2_dir_inode_operations;
+
+/* file.c */
+extern struct file_operations jffs2_file_operations;
+extern struct inode_operations jffs2_file_inode_operations;
+extern struct address_space_operations jffs2_file_address_operations;
+int jffs2_fsync(struct file *, struct dentry *, int);
+int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg);
+
+/* ioctl.c */
+int jffs2_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+/* symlink.c */
+extern struct inode_operations jffs2_symlink_inode_operations;
+
+/* fs.c */
+int jffs2_setattr (struct dentry *, struct iattr *);
+void jffs2_read_inode (struct inode *);
+void jffs2_clear_inode (struct inode *);
+void jffs2_dirty_inode(struct inode *inode);
+struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
+			       struct jffs2_raw_inode *ri);
+int jffs2_statfs (struct super_block *, struct kstatfs *);
+void jffs2_write_super (struct super_block *);
+int jffs2_remount_fs (struct super_block *, int *, char *);
+int jffs2_do_fill_super(struct super_block *sb, void *data, int silent);
+void jffs2_gc_release_inode(struct jffs2_sb_info *c,
+			    struct jffs2_inode_info *f);
+struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
+					      int inum, int nlink);
+
+unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, 
+				   struct jffs2_inode_info *f, 
+				   unsigned long offset,
+				   unsigned long *priv);
+void jffs2_gc_release_page(struct jffs2_sb_info *c,
+			   unsigned char *pg,
+			   unsigned long *priv);
+void jffs2_flash_cleanup(struct jffs2_sb_info *c);
+     
+
+/* writev.c */
+int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, 
+		       unsigned long count, loff_t to, size_t *retlen);
+
+
+#endif /* __JFFS2_OS_LINUX_H__ */
+
+
diff --git a/fs/jffs2/pushpull.h b/fs/jffs2/pushpull.h
new file mode 100644
index 000000000000..c0c2a9158dff
--- /dev/null
+++ b/fs/jffs2/pushpull.h
@@ -0,0 +1,72 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: pushpull.h,v 1.10 2004/11/16 20:36:11 dwmw2 Exp $
+ *
+ */
+
+#ifndef __PUSHPULL_H__
+#define __PUSHPULL_H__
+
+#include <linux/errno.h>
+
+struct pushpull {
+	unsigned char *buf;
+	unsigned int buflen;
+	unsigned int ofs;
+	unsigned int reserve;
+};
+
+
+static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen, unsigned ofs, unsigned reserve)
+{
+	pp->buf = buf;
+	pp->buflen = buflen;
+	pp->ofs = ofs;
+	pp->reserve = reserve;
+}
+
+static inline int pushbit(struct pushpull *pp, int bit, int use_reserved)
+{
+	if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) {
+		return -ENOSPC;
+	}
+
+	if (bit) {
+		pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs &7)));
+	}
+	else {
+		pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs &7)));
+	}
+	pp->ofs++;
+
+	return 0;
+}
+
+static inline int pushedbits(struct pushpull *pp)
+{
+	return pp->ofs;
+}
+
+static inline int pullbit(struct pushpull *pp)
+{
+	int bit;
+
+	bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1;
+
+	pp->ofs++;
+	return bit;
+}
+
+static inline int pulledbits(struct pushpull *pp)
+{
+	return pp->ofs;
+}
+
+#endif /* __PUSHPULL_H__ */
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
new file mode 100644
index 000000000000..eb493dc06db7
--- /dev/null
+++ b/fs/jffs2/read.c
@@ -0,0 +1,246 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: read.c,v 1.38 2004/11/16 20:36:12 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/compiler.h>
+#include "nodelist.h"
+#include "compr.h"
+
+int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+		     struct jffs2_full_dnode *fd, unsigned char *buf,
+		     int ofs, int len)
+{
+	struct jffs2_raw_inode *ri;
+	size_t readlen;
+	uint32_t crc;
+	unsigned char *decomprbuf = NULL;
+	unsigned char *readbuf = NULL;
+	int ret = 0;
+
+	ri = jffs2_alloc_raw_inode();
+	if (!ri)
+		return -ENOMEM;
+
+	ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri);
+	if (ret) {
+		jffs2_free_raw_inode(ri);
+		printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret);
+		return ret;
+	}
+	if (readlen != sizeof(*ri)) {
+		jffs2_free_raw_inode(ri);
+		printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", 
+		       ref_offset(fd->raw), sizeof(*ri), readlen);
+		return -EIO;
+	}
+	crc = crc32(0, ri, sizeof(*ri)-8);
+
+	D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n",
+		  ref_offset(fd->raw), je32_to_cpu(ri->node_crc),
+		  crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize),
+		  je32_to_cpu(ri->offset), buf));
+	if (crc != je32_to_cpu(ri->node_crc)) {
+		printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n",
+		       je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw));
+		ret = -EIO;
+		goto out_ri;
+	}
+	/* There was a bug where we wrote hole nodes out with csize/dsize
+	   swapped. Deal with it */
+	if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && 
+	    je32_to_cpu(ri->csize)) {
+		ri->dsize = ri->csize;
+		ri->csize = cpu_to_je32(0);
+	}
+
+	D1(if(ofs + len > je32_to_cpu(ri->dsize)) {
+		printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n",
+		       len, ofs, je32_to_cpu(ri->dsize));
+		ret = -EINVAL;
+		goto out_ri;
+	});
+
+	
+	if (ri->compr == JFFS2_COMPR_ZERO) {
+		memset(buf, 0, len);
+		goto out_ri;
+	}
+
+	/* Cases:
+	   Reading whole node and it's uncompressed - read directly to buffer provided, check CRC.
+	   Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided 
+	   Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy 
+	   Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy
+	*/
+	if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) {
+		readbuf = buf;
+	} else {
+		readbuf = kmalloc(je32_to_cpu(ri->csize), GFP_KERNEL);
+		if (!readbuf) {
+			ret = -ENOMEM;
+			goto out_ri;
+		}
+	}
+	if (ri->compr != JFFS2_COMPR_NONE) {
+		if (len < je32_to_cpu(ri->dsize)) {
+			decomprbuf = kmalloc(je32_to_cpu(ri->dsize), GFP_KERNEL);
+			if (!decomprbuf) {
+				ret = -ENOMEM;
+				goto out_readbuf;
+			}
+		} else {
+			decomprbuf = buf;
+		}
+	} else {
+		decomprbuf = readbuf;
+	}
+
+	D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize),
+		  readbuf));
+	ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri),
+			       je32_to_cpu(ri->csize), &readlen, readbuf);
+
+	if (!ret && readlen != je32_to_cpu(ri->csize))
+		ret = -EIO;
+	if (ret)
+		goto out_decomprbuf;
+
+	crc = crc32(0, readbuf, je32_to_cpu(ri->csize));
+	if (crc != je32_to_cpu(ri->data_crc)) {
+		printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n",
+		       je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw));
+		ret = -EIO;
+		goto out_decomprbuf;
+	}
+	D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc));
+	if (ri->compr != JFFS2_COMPR_NONE) {
+		D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n",
+			  je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); 
+		ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize));
+		if (ret) {
+			printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret);
+			goto out_decomprbuf;
+		}
+	}
+
+	if (len < je32_to_cpu(ri->dsize)) {
+		memcpy(buf, decomprbuf+ofs, len);
+	}
+ out_decomprbuf:
+	if(decomprbuf != buf && decomprbuf != readbuf)
+		kfree(decomprbuf);
+ out_readbuf:
+	if(readbuf != buf)
+		kfree(readbuf);
+ out_ri:
+	jffs2_free_raw_inode(ri);
+
+	return ret;
+}
+
+int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			   unsigned char *buf, uint32_t offset, uint32_t len)
+{
+	uint32_t end = offset + len;
+	struct jffs2_node_frag *frag;
+	int ret;
+
+	D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n",
+		  f->inocache->ino, offset, offset+len));
+
+	frag = jffs2_lookup_node_frag(&f->fragtree, offset);
+
+	/* XXX FIXME: Where a single physical node actually shows up in two
+	   frags, we read it twice. Don't do that. */
+	/* Now we're pointing at the first frag which overlaps our page */
+	while(offset < end) {
+		D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
+		if (unlikely(!frag || frag->ofs > offset)) {
+			uint32_t holesize = end - offset;
+			if (frag) {
+				D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
+				holesize = min(holesize, frag->ofs - offset);
+				D2(jffs2_print_frag_list(f));
+			}
+			D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
+			memset(buf, 0, holesize);
+			buf += holesize;
+			offset += holesize;
+			continue;
+		} else if (unlikely(!frag->node)) {
+			uint32_t holeend = min(end, frag->ofs + frag->size);
+			D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
+			memset(buf, 0, holeend - offset);
+			buf += holeend - offset;
+			offset = holeend;
+			frag = frag_next(frag);
+			continue;
+		} else {
+			uint32_t readlen;
+			uint32_t fragofs; /* offset within the frag to start reading */
+			
+			fragofs = offset - frag->ofs;
+			readlen = min(frag->size - fragofs, end - offset);
+			D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n",
+				  frag->ofs+fragofs, frag->ofs+fragofs+readlen,
+				  ref_offset(frag->node->raw), ref_flags(frag->node->raw)));
+			ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen);
+			D2(printk(KERN_DEBUG "node read done\n"));
+			if (ret) {
+				D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret));
+				memset(buf, 0, readlen);
+				return ret;
+			}
+			buf += readlen;
+			offset += readlen;
+			frag = frag_next(frag);
+			D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
+		}
+	}
+	return 0;
+}
+
+/* Core function to read symlink target. */
+char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
+{
+	char *buf;
+	int ret;
+
+	down(&f->sem);
+
+	if (!f->metadata) {
+		printk(KERN_NOTICE "No metadata for symlink inode #%u\n", f->inocache->ino);
+		up(&f->sem);
+		return ERR_PTR(-EINVAL);
+	}
+	buf = kmalloc(f->metadata->size+1, GFP_USER);
+	if (!buf) {
+		up(&f->sem);
+		return ERR_PTR(-ENOMEM);
+	}
+	buf[f->metadata->size]=0;
+
+	ret = jffs2_read_dnode(c, f, f->metadata, buf, 0, f->metadata->size);
+
+	up(&f->sem);
+
+	if (ret) {
+		kfree(buf);
+		return ERR_PTR(ret);
+	}
+	return buf;
+}
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
new file mode 100644
index 000000000000..aca4a0b17bcd
--- /dev/null
+++ b/fs/jffs2/readinode.c
@@ -0,0 +1,695 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: readinode.c,v 1.117 2004/11/20 18:06:54 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/crc32.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/compiler.h>
+#include "nodelist.h"
+
+static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag);
+
+#if CONFIG_JFFS2_FS_DEBUG >= 2
+static void jffs2_print_fragtree(struct rb_root *list, int permitbug)
+{
+	struct jffs2_node_frag *this = frag_first(list);
+	uint32_t lastofs = 0;
+	int buggy = 0;
+
+	while(this) {
+		if (this->node)
+			printk(KERN_DEBUG "frag %04x-%04x: 0x%08x(%d) on flash (*%p). left (%p), right (%p), parent (%p)\n",
+			       this->ofs, this->ofs+this->size, ref_offset(this->node->raw), ref_flags(this->node->raw),
+			       this, frag_left(this), frag_right(this), frag_parent(this));
+		else 
+			printk(KERN_DEBUG "frag %04x-%04x: hole (*%p). left (%p} right (%p), parent (%p)\n", this->ofs, 
+			       this->ofs+this->size, this, frag_left(this), frag_right(this), frag_parent(this));
+		if (this->ofs != lastofs)
+			buggy = 1;
+		lastofs = this->ofs+this->size;
+		this = frag_next(this);
+	}
+	if (buggy && !permitbug) {
+		printk(KERN_CRIT "Frag tree got a hole in it\n");
+		BUG();
+	}
+}
+
+void jffs2_print_frag_list(struct jffs2_inode_info *f)
+{
+	jffs2_print_fragtree(&f->fragtree, 0);
+
+	if (f->metadata) {
+		printk(KERN_DEBUG "metadata at 0x%08x\n", ref_offset(f->metadata->raw));
+	}
+}
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG >= 1
+static int jffs2_sanitycheck_fragtree(struct jffs2_inode_info *f)
+{
+	struct jffs2_node_frag *frag;
+	int bitched = 0;
+
+	for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
+
+		struct jffs2_full_dnode *fn = frag->node;
+		if (!fn || !fn->raw)
+			continue;
+
+		if (ref_flags(fn->raw) == REF_PRISTINE) {
+
+			if (fn->frags > 1) {
+				printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2\n", ref_offset(fn->raw), fn->frags);
+				bitched = 1;
+			}
+			/* A hole node which isn't multi-page should be garbage-collected
+			   and merged anyway, so we just check for the frag size here,
+			   rather than mucking around with actually reading the node
+			   and checking the compression type, which is the real way
+			   to tell a hole node. */
+			if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) {
+				printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2\n",
+				       ref_offset(fn->raw));
+				bitched = 1;
+			}
+
+			if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) {
+				printk(KERN_WARNING "REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2\n",
+				       ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
+				bitched = 1;
+			}
+		}
+	}
+	
+	if (bitched) {
+		struct jffs2_node_frag *thisfrag;
+
+		printk(KERN_WARNING "Inode is #%u\n", f->inocache->ino);
+		thisfrag = frag_first(&f->fragtree);
+		while (thisfrag) {
+			if (!thisfrag->node) {
+				printk("Frag @0x%x-0x%x; node-less hole\n",
+				       thisfrag->ofs, thisfrag->size + thisfrag->ofs);
+			} else if (!thisfrag->node->raw) {
+				printk("Frag @0x%x-0x%x; raw-less hole\n",
+				       thisfrag->ofs, thisfrag->size + thisfrag->ofs);
+			} else {
+				printk("Frag @0x%x-0x%x; raw at 0x%08x(%d) (0x%x-0x%x)\n",
+				       thisfrag->ofs, thisfrag->size + thisfrag->ofs,
+				       ref_offset(thisfrag->node->raw), ref_flags(thisfrag->node->raw),
+				       thisfrag->node->ofs, thisfrag->node->ofs+thisfrag->node->size);
+			}
+			thisfrag = frag_next(thisfrag);
+		}
+	}
+	return bitched;
+}
+#endif /* D1 */
+
+static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this)
+{
+	if (this->node) {
+		this->node->frags--;
+		if (!this->node->frags) {
+			/* The node has no valid frags left. It's totally obsoleted */
+			D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n",
+				  ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size));
+			jffs2_mark_node_obsolete(c, this->node->raw);
+			jffs2_free_full_dnode(this->node);
+		} else {
+			D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n",
+				  ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size,
+				  this->node->frags));
+			mark_ref_normal(this->node->raw);
+		}
+		
+	}
+	jffs2_free_node_frag(this);
+}
+
+/* Given an inode, probably with existing list of fragments, add the new node
+ * to the fragment list.
+ */
+int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
+{
+	int ret;
+	struct jffs2_node_frag *newfrag;
+
+	D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn));
+
+	newfrag = jffs2_alloc_node_frag();
+	if (unlikely(!newfrag))
+		return -ENOMEM;
+
+	D2(printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n",
+		  fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag));
+	
+	if (unlikely(!fn->size)) {
+		jffs2_free_node_frag(newfrag);
+		return 0;
+	}
+
+	newfrag->ofs = fn->ofs;
+	newfrag->size = fn->size;
+	newfrag->node = fn;
+	newfrag->node->frags = 1;
+
+	ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag);
+	if (ret)
+		return ret;
+
+	/* If we now share a page with other nodes, mark either previous
+	   or next node REF_NORMAL, as appropriate.  */
+	if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
+		struct jffs2_node_frag *prev = frag_prev(newfrag);
+
+		mark_ref_normal(fn->raw);
+		/* If we don't start at zero there's _always_ a previous */	
+		if (prev->node)
+			mark_ref_normal(prev->node->raw);
+	}
+
+	if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
+		struct jffs2_node_frag *next = frag_next(newfrag);
+		
+		if (next) {
+			mark_ref_normal(fn->raw);
+			if (next->node)
+				mark_ref_normal(next->node->raw);
+		}
+	}
+	D2(if (jffs2_sanitycheck_fragtree(f)) {
+		   printk(KERN_WARNING "Just added node %04x-%04x @0x%08x on flash, newfrag *%p\n",
+			  fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag);
+		   return 0;
+	   })
+	D2(jffs2_print_frag_list(f));
+	return 0;
+}
+
+/* Doesn't set inode->i_size */
+static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag)
+{
+	struct jffs2_node_frag *this;
+	uint32_t lastend;
+
+	/* Skip all the nodes which are completed before this one starts */
+	this = jffs2_lookup_node_frag(list, newfrag->node->ofs);
+
+	if (this) {
+		D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n",
+			  this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this));
+		lastend = this->ofs + this->size;
+	} else {
+		D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave no frag\n"));
+		lastend = 0;
+	}
+			  
+	/* See if we ran off the end of the list */
+	if (lastend <= newfrag->ofs) {
+		/* We did */
+
+		/* Check if 'this' node was on the same page as the new node.
+		   If so, both 'this' and the new node get marked REF_NORMAL so
+		   the GC can take a look.
+		*/
+		if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
+			if (this->node)
+				mark_ref_normal(this->node->raw);
+			mark_ref_normal(newfrag->node->raw);
+		}
+
+		if (lastend < newfrag->node->ofs) {
+			/* ... and we need to put a hole in before the new node */
+			struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag();
+			if (!holefrag) {
+				jffs2_free_node_frag(newfrag);
+				return -ENOMEM;
+			}
+			holefrag->ofs = lastend;
+			holefrag->size = newfrag->node->ofs - lastend;
+			holefrag->node = NULL;
+			if (this) {
+				/* By definition, the 'this' node has no right-hand child, 
+				   because there are no frags with offset greater than it.
+				   So that's where we want to put the hole */
+				D2(printk(KERN_DEBUG "Adding hole frag (%p) on right of node at (%p)\n", holefrag, this));
+				rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right);
+			} else {
+				D2(printk(KERN_DEBUG "Adding hole frag (%p) at root of tree\n", holefrag));
+				rb_link_node(&holefrag->rb, NULL, &list->rb_node);
+			}
+			rb_insert_color(&holefrag->rb, list);
+			this = holefrag;
+		}
+		if (this) {
+			/* By definition, the 'this' node has no right-hand child, 
+			   because there are no frags with offset greater than it.
+			   So that's where we want to put the hole */
+			D2(printk(KERN_DEBUG "Adding new frag (%p) on right of node at (%p)\n", newfrag, this));
+			rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);			
+		} else {
+			D2(printk(KERN_DEBUG "Adding new frag (%p) at root of tree\n", newfrag));
+			rb_link_node(&newfrag->rb, NULL, &list->rb_node);
+		}
+		rb_insert_color(&newfrag->rb, list);
+		return 0;
+	}
+
+	D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", 
+		  this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this));
+
+	/* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes,
+	 * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs  
+	 */
+	if (newfrag->ofs > this->ofs) {
+		/* This node isn't completely obsoleted. The start of it remains valid */
+
+		/* Mark the new node and the partially covered node REF_NORMAL -- let
+		   the GC take a look at them */
+		mark_ref_normal(newfrag->node->raw);
+		if (this->node)
+			mark_ref_normal(this->node->raw);
+
+		if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
+			/* The new node splits 'this' frag into two */
+			struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag();
+			if (!newfrag2) {
+				jffs2_free_node_frag(newfrag);
+				return -ENOMEM;
+			}
+			D2(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size);
+			if (this->node)
+				printk("phys 0x%08x\n", ref_offset(this->node->raw));
+			else 
+				printk("hole\n");
+			   )
+			
+			/* New second frag pointing to this's node */
+			newfrag2->ofs = newfrag->ofs + newfrag->size;
+			newfrag2->size = (this->ofs+this->size) - newfrag2->ofs;
+			newfrag2->node = this->node;
+			if (this->node)
+				this->node->frags++;
+
+			/* Adjust size of original 'this' */
+			this->size = newfrag->ofs - this->ofs;
+
+			/* Now, we know there's no node with offset
+			   greater than this->ofs but smaller than
+			   newfrag2->ofs or newfrag->ofs, for obvious
+			   reasons. So we can do a tree insert from
+			   'this' to insert newfrag, and a tree insert
+			   from newfrag to insert newfrag2. */
+			jffs2_fragtree_insert(newfrag, this);
+			rb_insert_color(&newfrag->rb, list);
+			
+			jffs2_fragtree_insert(newfrag2, newfrag);
+			rb_insert_color(&newfrag2->rb, list);
+			
+			return 0;
+		}
+		/* New node just reduces 'this' frag in size, doesn't split it */
+		this->size = newfrag->ofs - this->ofs;
+
+		/* Again, we know it lives down here in the tree */
+		jffs2_fragtree_insert(newfrag, this);
+		rb_insert_color(&newfrag->rb, list);
+	} else {
+		/* New frag starts at the same point as 'this' used to. Replace 
+		   it in the tree without doing a delete and insertion */
+		D2(printk(KERN_DEBUG "Inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n",
+			  newfrag, newfrag->ofs, newfrag->ofs+newfrag->size,
+			  this, this->ofs, this->ofs+this->size));
+	
+		rb_replace_node(&this->rb, &newfrag->rb, list);
+		
+		if (newfrag->ofs + newfrag->size >= this->ofs+this->size) {
+			D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size));
+			jffs2_obsolete_node_frag(c, this);
+		} else {
+			this->ofs += newfrag->size;
+			this->size -= newfrag->size;
+
+			jffs2_fragtree_insert(this, newfrag);
+			rb_insert_color(&this->rb, list);
+			return 0;
+		}
+	}
+	/* OK, now we have newfrag added in the correct place in the tree, but
+	   frag_next(newfrag) may be a fragment which is overlapped by it 
+	*/
+	while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
+		/* 'this' frag is obsoleted completely. */
+		D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size));
+		rb_erase(&this->rb, list);
+		jffs2_obsolete_node_frag(c, this);
+	}
+	/* Now we're pointing at the first frag which isn't totally obsoleted by 
+	   the new frag */
+
+	if (!this || newfrag->ofs + newfrag->size == this->ofs) {
+		return 0;
+	}
+	/* Still some overlap but we don't need to move it in the tree */
+	this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
+	this->ofs = newfrag->ofs + newfrag->size;
+
+	/* And mark them REF_NORMAL so the GC takes a look at them */
+	if (this->node)
+		mark_ref_normal(this->node->raw);
+	mark_ref_normal(newfrag->node->raw);
+
+	return 0;
+}
+
+void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size)
+{
+	struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size);
+
+	D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size));
+
+	/* We know frag->ofs <= size. That's what lookup does for us */
+	if (frag && frag->ofs != size) {
+		if (frag->ofs+frag->size >= size) {
+			D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size));
+			frag->size = size - frag->ofs;
+		}
+		frag = frag_next(frag);
+	}
+	while (frag && frag->ofs >= size) {
+		struct jffs2_node_frag *next = frag_next(frag);
+
+		D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size));
+		frag_erase(frag, list);
+		jffs2_obsolete_node_frag(c, frag);
+		frag = next;
+	}
+}
+
+/* Scan the list of all nodes present for this ino, build map of versions, etc. */
+
+static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, 
+					struct jffs2_inode_info *f,
+					struct jffs2_raw_inode *latest_node);
+
+int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, 
+			uint32_t ino, struct jffs2_raw_inode *latest_node)
+{
+	D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n"));
+
+ retry_inocache:
+	spin_lock(&c->inocache_lock);
+	f->inocache = jffs2_get_ino_cache(c, ino);
+
+	D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache));
+
+	if (f->inocache) {
+		/* Check its state. We may need to wait before we can use it */
+		switch(f->inocache->state) {
+		case INO_STATE_UNCHECKED:
+		case INO_STATE_CHECKEDABSENT:
+			f->inocache->state = INO_STATE_READING;
+			break;
+			
+		case INO_STATE_CHECKING:
+		case INO_STATE_GC:
+			/* If it's in either of these states, we need
+			   to wait for whoever's got it to finish and
+			   put it back. */
+			D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n",
+				  ino, f->inocache->state));
+			sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+			goto retry_inocache;
+
+		case INO_STATE_READING:
+		case INO_STATE_PRESENT:
+			/* Eep. This should never happen. It can
+			happen if Linux calls read_inode() again
+			before clear_inode() has finished though. */
+			printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
+			/* Fail. That's probably better than allowing it to succeed */
+			f->inocache = NULL;
+			break;
+
+		default:
+			BUG();
+		}
+	}
+	spin_unlock(&c->inocache_lock);
+
+	if (!f->inocache && ino == 1) {
+		/* Special case - no root inode on medium */
+		f->inocache = jffs2_alloc_inode_cache();
+		if (!f->inocache) {
+			printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n");
+			return -ENOMEM;
+		}
+		D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n"));
+		memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
+		f->inocache->ino = f->inocache->nlink = 1;
+		f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
+		f->inocache->state = INO_STATE_READING;
+		jffs2_add_ino_cache(c, f->inocache);
+	}
+	if (!f->inocache) {
+		printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino);
+		return -ENOENT;
+	}
+
+	return jffs2_do_read_inode_internal(c, f, latest_node);
+}
+
+int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+	struct jffs2_raw_inode n;
+	struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
+	int ret;
+
+	if (!f)
+		return -ENOMEM;
+
+	memset(f, 0, sizeof(*f));
+	init_MUTEX_LOCKED(&f->sem);
+	f->inocache = ic;
+
+	ret = jffs2_do_read_inode_internal(c, f, &n);
+	if (!ret) {
+		up(&f->sem);
+		jffs2_do_clear_inode(c, f);
+	}
+	kfree (f);
+	return ret;
+}
+
+static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, 
+					struct jffs2_inode_info *f,
+					struct jffs2_raw_inode *latest_node)
+{
+	struct jffs2_tmp_dnode_info *tn_list, *tn;
+	struct jffs2_full_dirent *fd_list;
+	struct jffs2_full_dnode *fn = NULL;
+	uint32_t crc;
+	uint32_t latest_mctime, mctime_ver;
+	uint32_t mdata_ver = 0;
+	size_t retlen;
+	int ret;
+
+	D1(printk(KERN_DEBUG "jffs2_do_read_inode_internal(): ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink));
+
+	/* Grab all nodes relevant to this ino */
+	ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
+
+	if (ret) {
+		printk(KERN_CRIT "jffs2_get_inode_nodes() for ino %u returned %d\n", f->inocache->ino, ret);
+		if (f->inocache->state == INO_STATE_READING)
+			jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
+		return ret;
+	}
+	f->dents = fd_list;
+
+	while (tn_list) {
+		tn = tn_list;
+
+		fn = tn->fn;
+
+		if (f->metadata) {
+			if (likely(tn->version >= mdata_ver)) {
+				D1(printk(KERN_DEBUG "Obsoleting old metadata at 0x%08x\n", ref_offset(f->metadata->raw)));
+				jffs2_mark_node_obsolete(c, f->metadata->raw);
+				jffs2_free_full_dnode(f->metadata);
+				f->metadata = NULL;
+				
+				mdata_ver = 0;
+			} else {
+				/* This should never happen. */
+				printk(KERN_WARNING "Er. New metadata at 0x%08x with ver %d is actually older than previous ver %d at 0x%08x\n",
+					  ref_offset(fn->raw), tn->version, mdata_ver, ref_offset(f->metadata->raw));
+				jffs2_mark_node_obsolete(c, fn->raw);
+				jffs2_free_full_dnode(fn);
+				/* Fill in latest_node from the metadata, not this one we're about to free... */
+				fn = f->metadata;
+				goto next_tn;
+			}
+		}
+
+		if (fn->size) {
+			jffs2_add_full_dnode_to_inode(c, f, fn);
+		} else {
+			/* Zero-sized node at end of version list. Just a metadata update */
+			D1(printk(KERN_DEBUG "metadata @%08x: ver %d\n", ref_offset(fn->raw), tn->version));
+			f->metadata = fn;
+			mdata_ver = tn->version;
+		}
+	next_tn:
+		tn_list = tn->next;
+		jffs2_free_tmp_dnode_info(tn);
+	}
+	D1(jffs2_sanitycheck_fragtree(f));
+
+	if (!fn) {
+		/* No data nodes for this inode. */
+		if (f->inocache->ino != 1) {
+			printk(KERN_WARNING "jffs2_do_read_inode(): No data nodes found for ino #%u\n", f->inocache->ino);
+			if (!fd_list) {
+				if (f->inocache->state == INO_STATE_READING)
+					jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
+				return -EIO;
+			}
+			printk(KERN_WARNING "jffs2_do_read_inode(): But it has children so we fake some modes for it\n");
+		}
+		latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
+		latest_node->version = cpu_to_je32(0);
+		latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
+		latest_node->isize = cpu_to_je32(0);
+		latest_node->gid = cpu_to_je16(0);
+		latest_node->uid = cpu_to_je16(0);
+		if (f->inocache->state == INO_STATE_READING)
+			jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
+		return 0;
+	}
+
+	ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
+	if (ret || retlen != sizeof(*latest_node)) {
+		printk(KERN_NOTICE "MTD read in jffs2_do_read_inode() failed: Returned %d, %zd of %zd bytes read\n",
+		       ret, retlen, sizeof(*latest_node));
+		/* FIXME: If this fails, there seems to be a memory leak. Find it. */
+		up(&f->sem);
+		jffs2_do_clear_inode(c, f);
+		return ret?ret:-EIO;
+	}
+
+	crc = crc32(0, latest_node, sizeof(*latest_node)-8);
+	if (crc != je32_to_cpu(latest_node->node_crc)) {
+		printk(KERN_NOTICE "CRC failed for read_inode of inode %u at physical location 0x%x\n", f->inocache->ino, ref_offset(fn->raw));
+		up(&f->sem);
+		jffs2_do_clear_inode(c, f);
+		return -EIO;
+	}
+
+	switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
+	case S_IFDIR:
+		if (mctime_ver > je32_to_cpu(latest_node->version)) {
+			/* The times in the latest_node are actually older than
+			   mctime in the latest dirent. Cheat. */
+			latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
+		}
+		break;
+
+			
+	case S_IFREG:
+		/* If it was a regular file, truncate it to the latest node's isize */
+		jffs2_truncate_fraglist(c, &f->fragtree, je32_to_cpu(latest_node->isize));
+		break;
+
+	case S_IFLNK:
+		/* Hack to work around broken isize in old symlink code.
+		   Remove this when dwmw2 comes to his senses and stops
+		   symlinks from being an entirely gratuitous special
+		   case. */
+		if (!je32_to_cpu(latest_node->isize))
+			latest_node->isize = latest_node->dsize;
+		/* fall through... */
+
+	case S_IFBLK:
+	case S_IFCHR:
+		/* Certain inode types should have only one data node, and it's
+		   kept as the metadata node */
+		if (f->metadata) {
+			printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o had metadata node\n",
+			       f->inocache->ino, jemode_to_cpu(latest_node->mode));
+			up(&f->sem);
+			jffs2_do_clear_inode(c, f);
+			return -EIO;
+		}
+		if (!frag_first(&f->fragtree)) {
+			printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o has no fragments\n",
+			       f->inocache->ino, jemode_to_cpu(latest_node->mode));
+			up(&f->sem);
+			jffs2_do_clear_inode(c, f);
+			return -EIO;
+		}
+		/* ASSERT: f->fraglist != NULL */
+		if (frag_next(frag_first(&f->fragtree))) {
+			printk(KERN_WARNING "Argh. Special inode #%u with mode 0x%x had more than one node\n",
+			       f->inocache->ino, jemode_to_cpu(latest_node->mode));
+			/* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
+			up(&f->sem);
+			jffs2_do_clear_inode(c, f);
+			return -EIO;
+		}
+		/* OK. We're happy */
+		f->metadata = frag_first(&f->fragtree)->node;
+		jffs2_free_node_frag(frag_first(&f->fragtree));
+		f->fragtree = RB_ROOT;
+		break;
+	}
+	if (f->inocache->state == INO_STATE_READING)
+		jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
+
+	return 0;
+}
+
+void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
+{
+	struct jffs2_full_dirent *fd, *fds;
+	int deleted;
+
+	down(&f->sem);
+	deleted = f->inocache && !f->inocache->nlink;
+
+	if (f->metadata) {
+		if (deleted)
+			jffs2_mark_node_obsolete(c, f->metadata->raw);
+		jffs2_free_full_dnode(f->metadata);
+	}
+
+	jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
+
+	fds = f->dents;
+
+	while(fds) {
+		fd = fds;
+		fds = fd->next;
+		jffs2_free_full_dirent(fd);
+	}
+
+	if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
+		jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
+
+	up(&f->sem);
+}
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
new file mode 100644
index 000000000000..ded53584a897
--- /dev/null
+++ b/fs/jffs2/scan.c
@@ -0,0 +1,916 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: scan.c,v 1.115 2004/11/17 12:59:08 dedekind Exp $
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/pagemap.h>
+#include <linux/crc32.h>
+#include <linux/compiler.h>
+#include "nodelist.h"
+
+#define EMPTY_SCAN_SIZE 1024
+
+#define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
+		c->free_size -= _x; c->dirty_size += _x; \
+		jeb->free_size -= _x ; jeb->dirty_size += _x; \
+		}while(0)
+#define USED_SPACE(x) do { typeof(x) _x = (x); \
+		c->free_size -= _x; c->used_size += _x; \
+		jeb->free_size -= _x ; jeb->used_size += _x; \
+		}while(0)
+#define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \
+		c->free_size -= _x; c->unchecked_size += _x; \
+		jeb->free_size -= _x ; jeb->unchecked_size += _x; \
+		}while(0)
+
+#define noisy_printk(noise, args...) do { \
+	if (*(noise)) { \
+		printk(KERN_NOTICE args); \
+		 (*(noise))--; \
+		 if (!(*(noise))) { \
+			 printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
+		 } \
+	} \
+} while(0)
+
+static uint32_t pseudo_random;
+
+static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				  unsigned char *buf, uint32_t buf_size);
+
+/* These helper functions _must_ increase ofs and also do the dirty/used space accounting. 
+ * Returning an error will abort the mount - bad checksums etc. should just mark the space
+ * as dirty.
+ */
+static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+				 struct jffs2_raw_inode *ri, uint32_t ofs);
+static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				 struct jffs2_raw_dirent *rd, uint32_t ofs);
+
+#define BLK_STATE_ALLFF		0
+#define BLK_STATE_CLEAN		1
+#define BLK_STATE_PARTDIRTY	2
+#define BLK_STATE_CLEANMARKER	3
+#define BLK_STATE_ALLDIRTY	4
+#define BLK_STATE_BADBLOCK	5
+
+static inline int min_free(struct jffs2_sb_info *c)
+{
+	uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
+#if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC
+	if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
+		return c->wbuf_pagesize;
+#endif
+	return min;
+
+}
+int jffs2_scan_medium(struct jffs2_sb_info *c)
+{
+	int i, ret;
+	uint32_t empty_blocks = 0, bad_blocks = 0;
+	unsigned char *flashbuf = NULL;
+	uint32_t buf_size = 0;
+#ifndef __ECOS
+	size_t pointlen;
+
+	if (c->mtd->point) {
+		ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf);
+		if (!ret && pointlen < c->mtd->size) {
+			/* Don't muck about if it won't let us point to the whole flash */
+			D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
+			c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
+			flashbuf = NULL;
+		}
+		if (ret)
+			D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
+	}
+#endif
+	if (!flashbuf) {
+		/* For NAND it's quicker to read a whole eraseblock at a time,
+		   apparently */
+		if (jffs2_cleanmarker_oob(c))
+			buf_size = c->sector_size;
+		else
+			buf_size = PAGE_SIZE;
+
+		/* Respect kmalloc limitations */
+		if (buf_size > 128*1024)
+			buf_size = 128*1024;
+
+		D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
+		flashbuf = kmalloc(buf_size, GFP_KERNEL);
+		if (!flashbuf)
+			return -ENOMEM;
+	}
+
+	for (i=0; i<c->nr_blocks; i++) {
+		struct jffs2_eraseblock *jeb = &c->blocks[i];
+
+		ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size);
+
+		if (ret < 0)
+			goto out;
+
+		ACCT_PARANOIA_CHECK(jeb);
+
+		/* Now decide which list to put it on */
+		switch(ret) {
+		case BLK_STATE_ALLFF:
+			/* 
+			 * Empty block.   Since we can't be sure it 
+			 * was entirely erased, we just queue it for erase
+			 * again.  It will be marked as such when the erase
+			 * is complete.  Meanwhile we still count it as empty
+			 * for later checks.
+			 */
+			empty_blocks++;
+			list_add(&jeb->list, &c->erase_pending_list);
+			c->nr_erasing_blocks++;
+			break;
+
+		case BLK_STATE_CLEANMARKER:
+			/* Only a CLEANMARKER node is valid */
+			if (!jeb->dirty_size) {
+				/* It's actually free */
+				list_add(&jeb->list, &c->free_list);
+				c->nr_free_blocks++;
+			} else {
+				/* Dirt */
+				D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
+				list_add(&jeb->list, &c->erase_pending_list);
+				c->nr_erasing_blocks++;
+			}
+			break;
+
+		case BLK_STATE_CLEAN:
+                        /* Full (or almost full) of clean data. Clean list */
+                        list_add(&jeb->list, &c->clean_list);
+			break;
+
+		case BLK_STATE_PARTDIRTY:
+                        /* Some data, but not full. Dirty list. */
+                        /* We want to remember the block with most free space
+                           and stick it in the 'nextblock' position to start writing to it. */
+                        if (jeb->free_size > min_free(c) && 
+			    (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
+                                /* Better candidate for the next writes to go to */
+                                if (c->nextblock) {
+					c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
+					c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
+					c->free_size -= c->nextblock->free_size;
+					c->wasted_size -= c->nextblock->wasted_size;
+					c->nextblock->free_size = c->nextblock->wasted_size = 0;
+					if (VERYDIRTY(c, c->nextblock->dirty_size)) {
+						list_add(&c->nextblock->list, &c->very_dirty_list);
+					} else {
+						list_add(&c->nextblock->list, &c->dirty_list);
+					}
+				}
+                                c->nextblock = jeb;
+                        } else {
+				jeb->dirty_size += jeb->free_size + jeb->wasted_size;
+				c->dirty_size += jeb->free_size + jeb->wasted_size;
+				c->free_size -= jeb->free_size;
+				c->wasted_size -= jeb->wasted_size;
+				jeb->free_size = jeb->wasted_size = 0;
+				if (VERYDIRTY(c, jeb->dirty_size)) {
+					list_add(&jeb->list, &c->very_dirty_list);
+				} else {
+					list_add(&jeb->list, &c->dirty_list);
+				}
+                        }
+			break;
+
+		case BLK_STATE_ALLDIRTY:
+			/* Nothing valid - not even a clean marker. Needs erasing. */
+                        /* For now we just put it on the erasing list. We'll start the erases later */
+			D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
+                        list_add(&jeb->list, &c->erase_pending_list);
+			c->nr_erasing_blocks++;
+			break;
+			
+		case BLK_STATE_BADBLOCK:
+			D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
+                        list_add(&jeb->list, &c->bad_list);
+			c->bad_size += c->sector_size;
+			c->free_size -= c->sector_size;
+			bad_blocks++;
+			break;
+		default:
+			printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
+			BUG();	
+		}
+	}
+	
+	/* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
+	if (c->nextblock && (c->nextblock->dirty_size)) {
+		c->nextblock->wasted_size += c->nextblock->dirty_size;
+		c->wasted_size += c->nextblock->dirty_size;
+		c->dirty_size -= c->nextblock->dirty_size;
+		c->nextblock->dirty_size = 0;
+	}
+#if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC
+	if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) {
+		/* If we're going to start writing into a block which already 
+		   contains data, and the end of the data isn't page-aligned,
+		   skip a little and align it. */
+
+		uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1);
+
+		D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
+			  skip));
+		c->nextblock->wasted_size += skip;
+		c->wasted_size += skip;
+
+		c->nextblock->free_size -= skip;
+		c->free_size -= skip;
+	}
+#endif
+	if (c->nr_erasing_blocks) {
+		if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { 
+			printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
+			printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
+			ret = -EIO;
+			goto out;
+		}
+		jffs2_erase_pending_trigger(c);
+	}
+	ret = 0;
+ out:
+	if (buf_size)
+		kfree(flashbuf);
+#ifndef __ECOS
+	else 
+		c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
+#endif
+	return ret;
+}
+
+static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf,
+				uint32_t ofs, uint32_t len)
+{
+	int ret;
+	size_t retlen;
+
+	ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
+	if (ret) {
+		D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
+		return ret;
+	}
+	if (retlen < len) {
+		D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
+		return -EIO;
+	}
+	D2(printk(KERN_DEBUG "Read 0x%x bytes from 0x%08x into buf\n", len, ofs));
+	D2(printk(KERN_DEBUG "000: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+		  buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]));
+	return 0;
+}
+
+static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+				  unsigned char *buf, uint32_t buf_size) {
+	struct jffs2_unknown_node *node;
+	struct jffs2_unknown_node crcnode;
+	uint32_t ofs, prevofs;
+	uint32_t hdr_crc, buf_ofs, buf_len;
+	int err;
+	int noise = 0;
+#ifdef CONFIG_JFFS2_FS_NAND
+	int cleanmarkerfound = 0;
+#endif
+
+	ofs = jeb->offset;
+	prevofs = jeb->offset - 1;
+
+	D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
+
+#ifdef CONFIG_JFFS2_FS_NAND
+	if (jffs2_cleanmarker_oob(c)) {
+		int ret = jffs2_check_nand_cleanmarker(c, jeb);
+		D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
+		/* Even if it's not found, we still scan to see
+		   if the block is empty. We use this information
+		   to decide whether to erase it or not. */
+		switch (ret) {
+		case 0:		cleanmarkerfound = 1; break;
+		case 1: 	break;
+		case 2: 	return BLK_STATE_BADBLOCK;
+		case 3:		return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
+		default: 	return ret;
+		}
+	}
+#endif
+	buf_ofs = jeb->offset;
+
+	if (!buf_size) {
+		buf_len = c->sector_size;
+	} else {
+		buf_len = EMPTY_SCAN_SIZE;
+		err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
+		if (err)
+			return err;
+	}
+	
+	/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
+	ofs = 0;
+
+	/* Scan only 4KiB of 0xFF before declaring it's empty */
+	while(ofs < EMPTY_SCAN_SIZE && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
+		ofs += 4;
+
+	if (ofs == EMPTY_SCAN_SIZE) {
+#ifdef CONFIG_JFFS2_FS_NAND
+		if (jffs2_cleanmarker_oob(c)) {
+			/* scan oob, take care of cleanmarker */
+			int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
+			D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
+			switch (ret) {
+			case 0:		return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
+			case 1: 	return BLK_STATE_ALLDIRTY;
+			default: 	return ret;
+			}
+		}
+#endif
+		D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
+		return BLK_STATE_ALLFF;	/* OK to erase if all blocks are like this */
+	}
+	if (ofs) {
+		D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
+			  jeb->offset + ofs));
+		DIRTY_SPACE(ofs);
+	}
+
+	/* Now ofs is a complete physical flash offset as it always was... */
+	ofs += jeb->offset;
+
+	noise = 10;
+
+scan_more:	
+	while(ofs < jeb->offset + c->sector_size) {
+
+		D1(ACCT_PARANOIA_CHECK(jeb));
+
+		cond_resched();
+
+		if (ofs & 3) {
+			printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
+			ofs = PAD(ofs);
+			continue;
+		}
+		if (ofs == prevofs) {
+			printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+		prevofs = ofs;
+
+		if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
+			D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
+				  jeb->offset, c->sector_size, ofs, sizeof(*node)));
+			DIRTY_SPACE((jeb->offset + c->sector_size)-ofs);
+			break;
+		}
+
+		if (buf_ofs + buf_len < ofs + sizeof(*node)) {
+			buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+			D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
+				  sizeof(struct jffs2_unknown_node), buf_len, ofs));
+			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
+			if (err)
+				return err;
+			buf_ofs = ofs;
+		}
+
+		node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
+
+		if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
+			uint32_t inbuf_ofs;
+			uint32_t empty_start;
+
+			empty_start = ofs;
+			ofs += 4;
+
+			D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
+		more_empty:
+			inbuf_ofs = ofs - buf_ofs;
+			while (inbuf_ofs < buf_len) {
+				if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) {
+					printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
+					       empty_start, ofs);
+					DIRTY_SPACE(ofs-empty_start);
+					goto scan_more;
+				}
+
+				inbuf_ofs+=4;
+				ofs += 4;
+			}
+			/* Ran off end. */
+			D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
+
+			/* If we're only checking the beginning of a block with a cleanmarker,
+			   bail now */
+			if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && 
+			    c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_in_ino) {
+				D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE));
+				return BLK_STATE_CLEANMARKER;
+			}
+
+			/* See how much more there is to read in this eraseblock... */
+			buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+			if (!buf_len) {
+				/* No more to read. Break out of main loop without marking 
+				   this range of empty space as dirty (because it's not) */
+				D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
+					  empty_start));
+				break;
+			}
+			D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
+			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
+			if (err)
+				return err;
+			buf_ofs = ofs;
+			goto more_empty;
+		}
+
+		if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
+			printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+		if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
+			D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+		if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
+			printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
+			printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+		if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
+			/* OK. We're out of possibilities. Whinge and move on */
+			noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", 
+				     JFFS2_MAGIC_BITMASK, ofs, 
+				     je16_to_cpu(node->magic));
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+		/* We seem to have a node of sorts. Check the CRC */
+		crcnode.magic = node->magic;
+		crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
+		crcnode.totlen = node->totlen;
+		hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
+
+		if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
+			noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
+				     ofs, je16_to_cpu(node->magic),
+				     je16_to_cpu(node->nodetype), 
+				     je32_to_cpu(node->totlen),
+				     je32_to_cpu(node->hdr_crc),
+				     hdr_crc);
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+
+		if (ofs + je32_to_cpu(node->totlen) > 
+		    jeb->offset + c->sector_size) {
+			/* Eep. Node goes over the end of the erase block. */
+			printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
+			       ofs, je32_to_cpu(node->totlen));
+			printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
+			DIRTY_SPACE(4);
+			ofs += 4;
+			continue;
+		}
+
+		if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
+			/* Wheee. This is an obsoleted node */
+			D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
+			DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+			ofs += PAD(je32_to_cpu(node->totlen));
+			continue;
+		}
+
+		switch(je16_to_cpu(node->nodetype)) {
+		case JFFS2_NODETYPE_INODE:
+			if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
+				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+				D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
+					  sizeof(struct jffs2_raw_inode), buf_len, ofs));
+				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
+				if (err)
+					return err;
+				buf_ofs = ofs;
+				node = (void *)buf;
+			}
+			err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs);
+			if (err) return err;
+			ofs += PAD(je32_to_cpu(node->totlen));
+			break;
+			
+		case JFFS2_NODETYPE_DIRENT:
+			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
+				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+				D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
+					  je32_to_cpu(node->totlen), buf_len, ofs));
+				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
+				if (err)
+					return err;
+				buf_ofs = ofs;
+				node = (void *)buf;
+			}
+			err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs);
+			if (err) return err;
+			ofs += PAD(je32_to_cpu(node->totlen));
+			break;
+
+		case JFFS2_NODETYPE_CLEANMARKER:
+			D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
+			if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
+				printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", 
+				       ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
+				DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
+				ofs += PAD(sizeof(struct jffs2_unknown_node));
+			} else if (jeb->first_node) {
+				printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
+				DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
+				ofs += PAD(sizeof(struct jffs2_unknown_node));
+			} else {
+				struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
+				if (!marker_ref) {
+					printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n");
+					return -ENOMEM;
+				}
+				marker_ref->next_in_ino = NULL;
+				marker_ref->next_phys = NULL;
+				marker_ref->flash_offset = ofs | REF_NORMAL;
+				marker_ref->__totlen = c->cleanmarker_size;
+				jeb->first_node = jeb->last_node = marker_ref;
+			     
+				USED_SPACE(PAD(c->cleanmarker_size));
+				ofs += PAD(c->cleanmarker_size);
+			}
+			break;
+
+		case JFFS2_NODETYPE_PADDING:
+			DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+			ofs += PAD(je32_to_cpu(node->totlen));
+			break;
+
+		default:
+			switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
+			case JFFS2_FEATURE_ROCOMPAT:
+				printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
+			        c->flags |= JFFS2_SB_FLAG_RO;
+				if (!(jffs2_is_readonly(c)))
+					return -EROFS;
+				DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+				ofs += PAD(je32_to_cpu(node->totlen));
+				break;
+
+			case JFFS2_FEATURE_INCOMPAT:
+				printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
+				return -EINVAL;
+
+			case JFFS2_FEATURE_RWCOMPAT_DELETE:
+				D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
+				DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+				ofs += PAD(je32_to_cpu(node->totlen));
+				break;
+
+			case JFFS2_FEATURE_RWCOMPAT_COPY:
+				D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
+				USED_SPACE(PAD(je32_to_cpu(node->totlen)));
+				ofs += PAD(je32_to_cpu(node->totlen));
+				break;
+			}
+		}
+	}
+
+
+	D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, 
+		  jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size));
+
+	/* mark_node_obsolete can add to wasted !! */
+	if (jeb->wasted_size) {
+		jeb->dirty_size += jeb->wasted_size;
+		c->dirty_size += jeb->wasted_size;
+		c->wasted_size -= jeb->wasted_size;
+		jeb->wasted_size = 0;
+	}
+
+	if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size 
+		&& (!jeb->first_node || !jeb->first_node->next_in_ino) )
+		return BLK_STATE_CLEANMARKER;
+		
+	/* move blocks with max 4 byte dirty space to cleanlist */	
+	else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
+		c->dirty_size -= jeb->dirty_size;
+		c->wasted_size += jeb->dirty_size; 
+		jeb->wasted_size += jeb->dirty_size;
+		jeb->dirty_size = 0;
+		return BLK_STATE_CLEAN;
+	} else if (jeb->used_size || jeb->unchecked_size)
+		return BLK_STATE_PARTDIRTY;
+	else
+		return BLK_STATE_ALLDIRTY;
+}
+
+static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
+{
+	struct jffs2_inode_cache *ic;
+
+	ic = jffs2_get_ino_cache(c, ino);
+	if (ic)
+		return ic;
+
+	if (ino > c->highest_ino)
+		c->highest_ino = ino;
+
+	ic = jffs2_alloc_inode_cache();
+	if (!ic) {
+		printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
+		return NULL;
+	}
+	memset(ic, 0, sizeof(*ic));
+
+	ic->ino = ino;
+	ic->nodes = (void *)ic;
+	jffs2_add_ino_cache(c, ic);
+	if (ino == 1)
+		ic->nlink = 1;
+	return ic;
+}
+
+static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+				 struct jffs2_raw_inode *ri, uint32_t ofs)
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_inode_cache *ic;
+	uint32_t ino = je32_to_cpu(ri->ino);
+
+	D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
+
+	/* We do very little here now. Just check the ino# to which we should attribute
+	   this node; we can do all the CRC checking etc. later. There's a tradeoff here -- 
+	   we used to scan the flash once only, reading everything we want from it into
+	   memory, then building all our in-core data structures and freeing the extra
+	   information. Now we allow the first part of the mount to complete a lot quicker,
+	   but we have to go _back_ to the flash in order to finish the CRC checking, etc. 
+	   Which means that the _full_ amount of time to get to proper write mode with GC
+	   operational may actually be _longer_ than before. Sucks to be me. */
+
+	raw = jffs2_alloc_raw_node_ref();
+	if (!raw) {
+		printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n");
+		return -ENOMEM;
+	}
+
+	ic = jffs2_get_ino_cache(c, ino);
+	if (!ic) {
+		/* Inocache get failed. Either we read a bogus ino# or it's just genuinely the
+		   first node we found for this inode. Do a CRC check to protect against the former
+		   case */
+		uint32_t crc = crc32(0, ri, sizeof(*ri)-8);
+
+		if (crc != je32_to_cpu(ri->node_crc)) {
+			printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+			       ofs, je32_to_cpu(ri->node_crc), crc);
+			/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
+			DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen)));
+			jffs2_free_raw_node_ref(raw);
+			return 0;
+		}
+		ic = jffs2_scan_make_ino_cache(c, ino);
+		if (!ic) {
+			jffs2_free_raw_node_ref(raw);
+			return -ENOMEM;
+		}
+	}
+
+	/* Wheee. It worked */
+
+	raw->flash_offset = ofs | REF_UNCHECKED;
+	raw->__totlen = PAD(je32_to_cpu(ri->totlen));
+	raw->next_phys = NULL;
+	raw->next_in_ino = ic->nodes;
+
+	ic->nodes = raw;
+	if (!jeb->first_node)
+		jeb->first_node = raw;
+	if (jeb->last_node)
+		jeb->last_node->next_phys = raw;
+	jeb->last_node = raw;
+
+	D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", 
+		  je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
+		  je32_to_cpu(ri->offset),
+		  je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
+
+	pseudo_random += je32_to_cpu(ri->version);
+
+	UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen)));
+	return 0;
+}
+
+static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
+				  struct jffs2_raw_dirent *rd, uint32_t ofs)
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_full_dirent *fd;
+	struct jffs2_inode_cache *ic;
+	uint32_t crc;
+
+	D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
+
+	/* We don't get here unless the node is still valid, so we don't have to
+	   mask in the ACCURATE bit any more. */
+	crc = crc32(0, rd, sizeof(*rd)-8);
+
+	if (crc != je32_to_cpu(rd->node_crc)) {
+		printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+		       ofs, je32_to_cpu(rd->node_crc), crc);
+		/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
+		DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
+		return 0;
+	}
+
+	pseudo_random += je32_to_cpu(rd->version);
+
+	fd = jffs2_alloc_full_dirent(rd->nsize+1);
+	if (!fd) {
+		return -ENOMEM;
+	}
+	memcpy(&fd->name, rd->name, rd->nsize);
+	fd->name[rd->nsize] = 0;
+
+	crc = crc32(0, fd->name, rd->nsize);
+	if (crc != je32_to_cpu(rd->name_crc)) {
+		printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+		       ofs, je32_to_cpu(rd->name_crc), crc);	
+		D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
+		jffs2_free_full_dirent(fd);
+		/* FIXME: Why do we believe totlen? */
+		/* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
+		DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
+		return 0;
+	}
+	raw = jffs2_alloc_raw_node_ref();
+	if (!raw) {
+		jffs2_free_full_dirent(fd);
+		printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n");
+		return -ENOMEM;
+	}
+	ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
+	if (!ic) {
+		jffs2_free_full_dirent(fd);
+		jffs2_free_raw_node_ref(raw);
+		return -ENOMEM;
+	}
+	
+	raw->__totlen = PAD(je32_to_cpu(rd->totlen));
+	raw->flash_offset = ofs | REF_PRISTINE;
+	raw->next_phys = NULL;
+	raw->next_in_ino = ic->nodes;
+	ic->nodes = raw;
+	if (!jeb->first_node)
+		jeb->first_node = raw;
+	if (jeb->last_node)
+		jeb->last_node->next_phys = raw;
+	jeb->last_node = raw;
+
+	fd->raw = raw;
+	fd->next = NULL;
+	fd->version = je32_to_cpu(rd->version);
+	fd->ino = je32_to_cpu(rd->ino);
+	fd->nhash = full_name_hash(fd->name, rd->nsize);
+	fd->type = rd->type;
+	USED_SPACE(PAD(je32_to_cpu(rd->totlen)));
+	jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
+
+	return 0;
+}
+
+static int count_list(struct list_head *l)
+{
+	uint32_t count = 0;
+	struct list_head *tmp;
+
+	list_for_each(tmp, l) {
+		count++;
+	}
+	return count;
+}
+
+/* Note: This breaks if list_empty(head). I don't care. You
+   might, if you copy this code and use it elsewhere :) */
+static void rotate_list(struct list_head *head, uint32_t count)
+{
+	struct list_head *n = head->next;
+
+	list_del(head);
+	while(count--) {
+		n = n->next;
+	}
+	list_add(head, n);
+}
+
+void jffs2_rotate_lists(struct jffs2_sb_info *c)
+{
+	uint32_t x;
+	uint32_t rotateby;
+
+	x = count_list(&c->clean_list);
+	if (x) {
+		rotateby = pseudo_random % x;
+		D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby));
+
+		rotate_list((&c->clean_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n",
+			  list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty clean_list\n"));
+	}
+
+	x = count_list(&c->very_dirty_list);
+	if (x) {
+		rotateby = pseudo_random % x;
+		D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby));
+
+		rotate_list((&c->very_dirty_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n",
+			  list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n"));
+	}
+
+	x = count_list(&c->dirty_list);
+	if (x) {
+		rotateby = pseudo_random % x;
+		D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby));
+
+		rotate_list((&c->dirty_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n",
+			  list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n"));
+	}
+
+	x = count_list(&c->erasable_list);
+	if (x) {
+		rotateby = pseudo_random % x;
+		D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby));
+
+		rotate_list((&c->erasable_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n",
+			  list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n"));
+	}
+
+	if (c->nr_erasing_blocks) {
+		rotateby = pseudo_random % c->nr_erasing_blocks;
+		D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby));
+
+		rotate_list((&c->erase_pending_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n",
+			  list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n"));
+	}
+
+	if (c->nr_free_blocks) {
+		rotateby = pseudo_random % c->nr_free_blocks;
+		D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby));
+
+		rotate_list((&c->free_list), rotateby);
+
+		D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n",
+			  list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset));
+	} else {
+		D1(printk(KERN_DEBUG "Not rotating empty free_list\n"));
+	}
+}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
new file mode 100644
index 000000000000..6b2a441d2766
--- /dev/null
+++ b/fs/jffs2/super.c
@@ -0,0 +1,365 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: super.c,v 1.104 2004/11/23 15:37:31 gleixner Exp $
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/jffs2.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/ctype.h>
+#include <linux/namei.h>
+#include "compr.h"
+#include "nodelist.h"
+
+static void jffs2_put_super(struct super_block *);
+
+static kmem_cache_t *jffs2_inode_cachep;
+
+static struct inode *jffs2_alloc_inode(struct super_block *sb)
+{
+	struct jffs2_inode_info *ei;
+	ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+static void jffs2_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+}
+
+static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+	struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		init_MUTEX_LOCKED(&ei->sem);
+		inode_init_once(&ei->vfs_inode);
+	}
+}
+
+static int jffs2_sync_fs(struct super_block *sb, int wait)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+	down(&c->alloc_sem);
+	jffs2_flush_wbuf_pad(c);
+	up(&c->alloc_sem);	
+	return 0;
+}
+
+static struct super_operations jffs2_super_operations =
+{
+	.alloc_inode =	jffs2_alloc_inode,
+	.destroy_inode =jffs2_destroy_inode,
+	.read_inode =	jffs2_read_inode,
+	.put_super =	jffs2_put_super,
+	.write_super =	jffs2_write_super,
+	.statfs =	jffs2_statfs,
+	.remount_fs =	jffs2_remount_fs,
+	.clear_inode =	jffs2_clear_inode,
+	.dirty_inode =	jffs2_dirty_inode,
+	.sync_fs =	jffs2_sync_fs,
+};
+
+static int jffs2_sb_compare(struct super_block *sb, void *data)
+{
+	struct jffs2_sb_info *p = data;
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+	/* The superblocks are considered to be equivalent if the underlying MTD
+	   device is the same one */
+	if (c->mtd == p->mtd) {
+		D1(printk(KERN_DEBUG "jffs2_sb_compare: match on device %d (\"%s\")\n", p->mtd->index, p->mtd->name));
+		return 1;
+	} else {
+		D1(printk(KERN_DEBUG "jffs2_sb_compare: No match, device %d (\"%s\"), device %d (\"%s\")\n",
+			  c->mtd->index, c->mtd->name, p->mtd->index, p->mtd->name));
+		return 0;
+	}
+}
+
+static int jffs2_sb_set(struct super_block *sb, void *data)
+{
+	struct jffs2_sb_info *p = data;
+
+	/* For persistence of NFS exports etc. we use the same s_dev
+	   each time we mount the device, don't just use an anonymous
+	   device */
+	sb->s_fs_info = p;
+	p->os_priv = sb;
+	sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, p->mtd->index);
+
+	return 0;
+}
+
+static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
+					      int flags, const char *dev_name, 
+					      void *data, struct mtd_info *mtd)
+{
+	struct super_block *sb;
+	struct jffs2_sb_info *c;
+	int ret;
+
+	c = kmalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+	memset(c, 0, sizeof(*c));
+	c->mtd = mtd;
+
+	sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c);
+
+	if (IS_ERR(sb))
+		goto out_put;
+
+	if (sb->s_root) {
+		/* New mountpoint for JFFS2 which is already mounted */
+		D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): Device %d (\"%s\") is already mounted\n",
+			  mtd->index, mtd->name));
+		goto out_put;
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): New superblock for device %d (\"%s\")\n",
+		  mtd->index, mtd->name));
+
+	sb->s_op = &jffs2_super_operations;
+	sb->s_flags = flags | MS_NOATIME;
+
+	ret = jffs2_do_fill_super(sb, data, (flags&MS_VERBOSE)?1:0);
+
+	if (ret) {
+		/* Failure case... */
+		up_write(&sb->s_umount);
+		deactivate_super(sb);
+		return ERR_PTR(ret);
+	}
+
+	sb->s_flags |= MS_ACTIVE;
+	return sb;
+
+ out_put:
+	kfree(c);
+	put_mtd_device(mtd);
+
+	return sb;
+}
+
+static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type,
+					      int flags, const char *dev_name, 
+					      void *data, int mtdnr)
+{
+	struct mtd_info *mtd;
+
+	mtd = get_mtd_device(NULL, mtdnr);
+	if (!mtd) {
+		D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", mtdnr));
+		return ERR_PTR(-EINVAL);
+	}
+
+	return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd);
+}
+
+static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
+					int flags, const char *dev_name,
+					void *data)
+{
+	int err;
+	struct nameidata nd;
+	int mtdnr;
+
+	if (!dev_name)
+		return ERR_PTR(-EINVAL);
+
+	D1(printk(KERN_DEBUG "jffs2_get_sb(): dev_name \"%s\"\n", dev_name));
+
+	/* The preferred way of mounting in future; especially when
+	   CONFIG_BLK_DEV is implemented - we specify the underlying
+	   MTD device by number or by name, so that we don't require 
+	   block device support to be present in the kernel. */
+
+	/* FIXME: How to do the root fs this way? */
+
+	if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') {
+		/* Probably mounting without the blkdev crap */
+		if (dev_name[3] == ':') {
+			struct mtd_info *mtd;
+
+			/* Mount by MTD device name */
+			D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd:%%s, name \"%s\"\n", dev_name+4));
+			for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) {
+				mtd = get_mtd_device(NULL, mtdnr);
+				if (mtd) {
+					if (!strcmp(mtd->name, dev_name+4))
+						return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd);
+					put_mtd_device(mtd);
+				}
+			}
+			printk(KERN_NOTICE "jffs2_get_sb(): MTD device with name \"%s\" not found.\n", dev_name+4);
+		} else if (isdigit(dev_name[3])) {
+			/* Mount by MTD device number name */
+			char *endptr;
+			
+			mtdnr = simple_strtoul(dev_name+3, &endptr, 0);
+			if (!*endptr) {
+				/* It was a valid number */
+				D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd%%d, mtdnr %d\n", mtdnr));
+				return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr);
+			}
+		}
+	}
+
+	/* Try the old way - the hack where we allowed users to mount 
+	   /dev/mtdblock$(n) but didn't actually _use_ the blkdev */
+
+	err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
+
+	D1(printk(KERN_DEBUG "jffs2_get_sb(): path_lookup() returned %d, inode %p\n",
+		  err, nd.dentry->d_inode));
+
+	if (err)
+		return ERR_PTR(err);
+
+	err = -EINVAL;
+
+	if (!S_ISBLK(nd.dentry->d_inode->i_mode))
+		goto out;
+
+	if (nd.mnt->mnt_flags & MNT_NODEV) {
+		err = -EACCES;
+		goto out;
+	}
+
+	if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR) {
+		if (!(flags & MS_VERBOSE)) /* Yes I mean this. Strangely */
+			printk(KERN_NOTICE "Attempt to mount non-MTD device \"%s\" as JFFS2\n",
+			       dev_name);
+		goto out;
+	}
+
+	mtdnr = iminor(nd.dentry->d_inode);
+	path_release(&nd);
+
+	return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr);
+
+out:
+	path_release(&nd);
+	return ERR_PTR(err);
+}
+
+static void jffs2_put_super (struct super_block *sb)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+	D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
+
+	if (!(sb->s_flags & MS_RDONLY))
+		jffs2_stop_garbage_collect_thread(c);
+	down(&c->alloc_sem);
+	jffs2_flush_wbuf_pad(c);
+	up(&c->alloc_sem);
+	jffs2_free_ino_caches(c);
+	jffs2_free_raw_node_refs(c);
+	if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
+		vfree(c->blocks);
+	else
+		kfree(c->blocks);
+	jffs2_flash_cleanup(c);
+	kfree(c->inocache_list);
+	if (c->mtd->sync)
+		c->mtd->sync(c->mtd);
+
+	D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
+}
+
+static void jffs2_kill_sb(struct super_block *sb)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+	generic_shutdown_super(sb);
+	put_mtd_device(c->mtd);
+	kfree(c);
+}
+
+static struct file_system_type jffs2_fs_type = {
+	.owner =	THIS_MODULE,
+	.name =		"jffs2",
+	.get_sb =	jffs2_get_sb,
+	.kill_sb =	jffs2_kill_sb,
+};
+
+static int __init init_jffs2_fs(void)
+{
+	int ret;
+
+	printk(KERN_INFO "JFFS2 version 2.2."
+#ifdef CONFIG_JFFS2_FS_NAND
+	       " (NAND)"
+#endif
+	       " (C) 2001-2003 Red Hat, Inc.\n");
+
+	jffs2_inode_cachep = kmem_cache_create("jffs2_i",
+					     sizeof(struct jffs2_inode_info),
+					     0, SLAB_RECLAIM_ACCOUNT,
+					     jffs2_i_init_once, NULL);
+	if (!jffs2_inode_cachep) {
+		printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n");
+		return -ENOMEM;
+	}
+	ret = jffs2_compressors_init();
+	if (ret) {
+		printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n");
+		goto out;
+	}
+	ret = jffs2_create_slab_caches();
+	if (ret) {
+		printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n");
+		goto out_compressors;
+	}
+	ret = register_filesystem(&jffs2_fs_type);
+	if (ret) {
+		printk(KERN_ERR "JFFS2 error: Failed to register filesystem\n");
+		goto out_slab;
+	}
+	return 0;
+
+ out_slab:
+	jffs2_destroy_slab_caches();
+ out_compressors:
+	jffs2_compressors_exit();
+ out:
+	kmem_cache_destroy(jffs2_inode_cachep);
+	return ret;
+}
+
+static void __exit exit_jffs2_fs(void)
+{
+	unregister_filesystem(&jffs2_fs_type);
+	jffs2_destroy_slab_caches();
+	jffs2_compressors_exit();
+	kmem_cache_destroy(jffs2_inode_cachep);
+}
+
+module_init(init_jffs2_fs);
+module_exit(exit_jffs2_fs);
+
+MODULE_DESCRIPTION("The Journalling Flash File System, v2");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for 
+		       // the sake of this tag. It's Free Software.
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
new file mode 100644
index 000000000000..7b1820d13712
--- /dev/null
+++ b/fs/jffs2/symlink.c
@@ -0,0 +1,45 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: symlink.c,v 1.14 2004/11/16 20:36:12 dwmw2 Exp $
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include "nodelist.h"
+
+static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd);
+static void jffs2_put_link(struct dentry *dentry, struct nameidata *nd);
+
+struct inode_operations jffs2_symlink_inode_operations =
+{	
+	.readlink =	generic_readlink,
+	.follow_link =	jffs2_follow_link,
+	.put_link =	jffs2_put_link,
+	.setattr =	jffs2_setattr
+};
+
+static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	unsigned char *buf;
+	buf = jffs2_getlink(JFFS2_SB_INFO(dentry->d_inode->i_sb), JFFS2_INODE_INFO(dentry->d_inode));
+	nd_set_link(nd, buf);
+	return 0;
+}
+
+static void jffs2_put_link(struct dentry *dentry, struct nameidata *nd)
+{
+	char *s = nd_get_link(nd);
+	if (!IS_ERR(s))
+		kfree(s);
+}
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
new file mode 100644
index 000000000000..c8128069ecf0
--- /dev/null
+++ b/fs/jffs2/wbuf.c
@@ -0,0 +1,1184 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: wbuf.c,v 1.82 2004/11/20 22:08:31 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/crc32.h>
+#include <linux/mtd/nand.h>
+#include "nodelist.h"
+
+/* For testing write failures */
+#undef BREAKME
+#undef BREAKMEHEADER
+
+#ifdef BREAKME
+static unsigned char *brokenbuf;
+#endif
+
+/* max. erase failures before we mark a block bad */
+#define MAX_ERASE_FAILURES 	2
+
+/* two seconds timeout for timed wbuf-flushing */
+#define WBUF_FLUSH_TIMEOUT	2 * HZ
+
+struct jffs2_inodirty {
+	uint32_t ino;
+	struct jffs2_inodirty *next;
+};
+
+static struct jffs2_inodirty inodirty_nomem;
+
+static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
+{
+	struct jffs2_inodirty *this = c->wbuf_inodes;
+
+	/* If a malloc failed, consider _everything_ dirty */
+	if (this == &inodirty_nomem)
+		return 1;
+
+	/* If ino == 0, _any_ non-GC writes mean 'yes' */
+	if (this && !ino)
+		return 1;
+
+	/* Look to see if the inode in question is pending in the wbuf */
+	while (this) {
+		if (this->ino == ino)
+			return 1;
+		this = this->next;
+	}
+	return 0;
+}
+
+static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
+{
+	struct jffs2_inodirty *this;
+
+	this = c->wbuf_inodes;
+
+	if (this != &inodirty_nomem) {
+		while (this) {
+			struct jffs2_inodirty *next = this->next;
+			kfree(this);
+			this = next;
+		}
+	}
+	c->wbuf_inodes = NULL;
+}
+
+static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
+{
+	struct jffs2_inodirty *new;
+
+	/* Mark the superblock dirty so that kupdated will flush... */
+	OFNI_BS_2SFFJ(c)->s_dirt = 1;
+
+	if (jffs2_wbuf_pending_for_ino(c, ino))
+		return;
+
+	new = kmalloc(sizeof(*new), GFP_KERNEL);
+	if (!new) {
+		D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
+		jffs2_clear_wbuf_ino_list(c);
+		c->wbuf_inodes = &inodirty_nomem;
+		return;
+	}
+	new->ino = ino;
+	new->next = c->wbuf_inodes;
+	c->wbuf_inodes = new;
+	return;
+}
+
+static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
+{
+	struct list_head *this, *next;
+	static int n;
+
+	if (list_empty(&c->erasable_pending_wbuf_list))
+		return;
+
+	list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
+		struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+		D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
+		list_del(this);
+		if ((jiffies + (n++)) & 127) {
+			/* Most of the time, we just erase it immediately. Otherwise we
+			   spend ages scanning it on mount, etc. */
+			D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
+			list_add_tail(&jeb->list, &c->erase_pending_list);
+			c->nr_erasing_blocks++;
+			jffs2_erase_pending_trigger(c);
+		} else {
+			/* Sometimes, however, we leave it elsewhere so it doesn't get
+			   immediately reused, and we spread the load a bit. */
+			D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
+			list_add_tail(&jeb->list, &c->erasable_list);
+		}
+	}
+}
+
+static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	D1(printk("About to refile bad block at %08x\n", jeb->offset));
+
+	D2(jffs2_dump_block_lists(c));
+	/* File the existing block on the bad_used_list.... */
+	if (c->nextblock == jeb)
+		c->nextblock = NULL;
+	else /* Not sure this should ever happen... need more coffee */
+		list_del(&jeb->list);
+	if (jeb->first_node) {
+		D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
+		list_add(&jeb->list, &c->bad_used_list);
+	} else {
+		BUG();
+		/* It has to have had some nodes or we couldn't be here */
+		D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
+		list_add(&jeb->list, &c->erase_pending_list);
+		c->nr_erasing_blocks++;
+		jffs2_erase_pending_trigger(c);
+	}
+	D2(jffs2_dump_block_lists(c));
+
+	/* Adjust its size counts accordingly */
+	c->wasted_size += jeb->free_size;
+	c->free_size -= jeb->free_size;
+	jeb->wasted_size += jeb->free_size;
+	jeb->free_size = 0;
+
+	ACCT_SANITY_CHECK(c,jeb);
+	D1(ACCT_PARANOIA_CHECK(jeb));
+}
+
+/* Recover from failure to write wbuf. Recover the nodes up to the
+ * wbuf, not the one which we were starting to try to write. */
+
+static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
+{
+	struct jffs2_eraseblock *jeb, *new_jeb;
+	struct jffs2_raw_node_ref **first_raw, **raw;
+	size_t retlen;
+	int ret;
+	unsigned char *buf;
+	uint32_t start, end, ofs, len;
+
+	spin_lock(&c->erase_completion_lock);
+
+	jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
+
+	jffs2_block_refile(c, jeb);
+
+	/* Find the first node to be recovered, by skipping over every
+	   node which ends before the wbuf starts, or which is obsolete. */
+	first_raw = &jeb->first_node;
+	while (*first_raw && 
+	       (ref_obsolete(*first_raw) ||
+		(ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
+		D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
+			  ref_offset(*first_raw), ref_flags(*first_raw),
+			  (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
+			  c->wbuf_ofs));
+		first_raw = &(*first_raw)->next_phys;
+	}
+
+	if (!*first_raw) {
+		/* All nodes were obsolete. Nothing to recover. */
+		D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
+		spin_unlock(&c->erase_completion_lock);
+		return;
+	}
+
+	start = ref_offset(*first_raw);
+	end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
+
+	/* Find the last node to be recovered */
+	raw = first_raw;
+	while ((*raw)) {
+		if (!ref_obsolete(*raw))
+			end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
+
+		raw = &(*raw)->next_phys;
+	}
+	spin_unlock(&c->erase_completion_lock);
+
+	D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
+
+	buf = NULL;
+	if (start < c->wbuf_ofs) {
+		/* First affected node was already partially written.
+		 * Attempt to reread the old data into our buffer. */
+
+		buf = kmalloc(end - start, GFP_KERNEL);
+		if (!buf) {
+			printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
+
+			goto read_failed;
+		}
+
+		/* Do the read... */
+		if (jffs2_cleanmarker_oob(c))
+			ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
+		else
+			ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
+		
+		if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
+			/* ECC recovered */
+			ret = 0;
+		}
+		if (ret || retlen != c->wbuf_ofs - start) {
+			printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
+
+			kfree(buf);
+			buf = NULL;
+		read_failed:
+			first_raw = &(*first_raw)->next_phys;
+			/* If this was the only node to be recovered, give up */
+			if (!(*first_raw))
+				return;
+
+			/* It wasn't. Go on and try to recover nodes complete in the wbuf */
+			start = ref_offset(*first_raw);
+		} else {
+			/* Read succeeded. Copy the remaining data from the wbuf */
+			memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
+		}
+	}
+	/* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
+	   Either 'buf' contains the data, or we find it in the wbuf */
+
+
+	/* ... and get an allocation of space from a shiny new block instead */
+	ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len);
+	if (ret) {
+		printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
+		if (buf)
+			kfree(buf);
+		return;
+	}
+	if (end-start >= c->wbuf_pagesize) {
+		/* Need to do another write immediately. This, btw,
+		 means that we'll be writing from 'buf' and not from
+		 the wbuf. Since if we're writing from the wbuf there
+		 won't be more than a wbuf full of data, now will
+		 there? :) */
+
+		uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
+
+		D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
+			  towrite, ofs));
+	  
+#ifdef BREAKMEHEADER
+		static int breakme;
+		if (breakme++ == 20) {
+			printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
+			breakme = 0;
+			c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
+					  brokenbuf, NULL, c->oobinfo);
+			ret = -EIO;
+		} else
+#endif
+		if (jffs2_cleanmarker_oob(c))
+			ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
+						buf, NULL, c->oobinfo);
+		else
+			ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, buf);
+
+		if (ret || retlen != towrite) {
+			/* Argh. We tried. Really we did. */
+			printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
+			kfree(buf);
+
+			if (retlen) {
+				struct jffs2_raw_node_ref *raw2;
+
+				raw2 = jffs2_alloc_raw_node_ref();
+				if (!raw2)
+					return;
+
+				raw2->flash_offset = ofs | REF_OBSOLETE;
+				raw2->__totlen = ref_totlen(c, jeb, *first_raw);
+				raw2->next_phys = NULL;
+				raw2->next_in_ino = NULL;
+
+				jffs2_add_physical_node_ref(c, raw2);
+			}
+			return;
+		}
+		printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
+
+		c->wbuf_len = (end - start) - towrite;
+		c->wbuf_ofs = ofs + towrite;
+		memcpy(c->wbuf, buf + towrite, c->wbuf_len);
+		/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
+
+		kfree(buf);
+	} else {
+		/* OK, now we're left with the dregs in whichever buffer we're using */
+		if (buf) {
+			memcpy(c->wbuf, buf, end-start);
+			kfree(buf);
+		} else {
+			memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
+		}
+		c->wbuf_ofs = ofs;
+		c->wbuf_len = end - start;
+	}
+
+	/* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
+	new_jeb = &c->blocks[ofs / c->sector_size];
+
+	spin_lock(&c->erase_completion_lock);
+	if (new_jeb->first_node) {
+		/* Odd, but possible with ST flash later maybe */
+		new_jeb->last_node->next_phys = *first_raw;
+	} else {
+		new_jeb->first_node = *first_raw;
+	}
+
+	raw = first_raw;
+	while (*raw) {
+		uint32_t rawlen = ref_totlen(c, jeb, *raw);
+
+		D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
+			  rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
+
+		if (ref_obsolete(*raw)) {
+			/* Shouldn't really happen much */
+			new_jeb->dirty_size += rawlen;
+			new_jeb->free_size -= rawlen;
+			c->dirty_size += rawlen;
+		} else {
+			new_jeb->used_size += rawlen;
+			new_jeb->free_size -= rawlen;
+			jeb->dirty_size += rawlen;
+			jeb->used_size  -= rawlen;
+			c->dirty_size += rawlen;
+		}
+		c->free_size -= rawlen;
+		(*raw)->flash_offset = ofs | ref_flags(*raw);
+		ofs += rawlen;
+		new_jeb->last_node = *raw;
+
+		raw = &(*raw)->next_phys;
+	}
+
+	/* Fix up the original jeb now it's on the bad_list */
+	*first_raw = NULL;
+	if (first_raw == &jeb->first_node) {
+		jeb->last_node = NULL;
+		D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
+		list_del(&jeb->list);
+		list_add(&jeb->list, &c->erase_pending_list);
+		c->nr_erasing_blocks++;
+		jffs2_erase_pending_trigger(c);
+	}
+	else
+		jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
+
+	ACCT_SANITY_CHECK(c,jeb);
+        D1(ACCT_PARANOIA_CHECK(jeb));
+
+	ACCT_SANITY_CHECK(c,new_jeb);
+        D1(ACCT_PARANOIA_CHECK(new_jeb));
+
+	spin_unlock(&c->erase_completion_lock);
+
+	D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
+}
+
+/* Meaning of pad argument:
+   0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
+   1: Pad, do not adjust nextblock free_size
+   2: Pad, adjust nextblock free_size
+*/
+#define NOPAD		0
+#define PAD_NOACCOUNT	1
+#define PAD_ACCOUNTING	2
+
+static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
+{
+	int ret;
+	size_t retlen;
+
+	/* Nothing to do if not NAND flash. In particular, we shouldn't
+	   del_timer() the timer we never initialised. */
+	if (jffs2_can_mark_obsolete(c))
+		return 0;
+
+	if (!down_trylock(&c->alloc_sem)) {
+		up(&c->alloc_sem);
+		printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
+		BUG();
+	}
+
+	if(!c->wbuf || !c->wbuf_len)
+		return 0;
+
+	/* claim remaining space on the page
+	   this happens, if we have a change to a new block,
+	   or if fsync forces us to flush the writebuffer.
+	   if we have a switch to next page, we will not have
+	   enough remaining space for this. 
+	*/
+	if (pad) {
+		c->wbuf_len = PAD(c->wbuf_len);
+
+		/* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
+		   with 8 byte page size */
+		memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
+		
+		if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
+			struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
+			padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+			padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
+			padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
+			padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
+		}
+	}
+	/* else jffs2_flash_writev has actually filled in the rest of the
+	   buffer for us, and will deal with the node refs etc. later. */
+	
+#ifdef BREAKME
+	static int breakme;
+	if (breakme++ == 20) {
+		printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
+		breakme = 0;
+		c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
+					&retlen, brokenbuf, NULL, c->oobinfo);
+		ret = -EIO;
+	} else 
+#endif
+	
+	if (jffs2_cleanmarker_oob(c))
+		ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
+	else
+		ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
+
+	if (ret || retlen != c->wbuf_pagesize) {
+		if (ret)
+			printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
+		else {
+			printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
+				retlen, c->wbuf_pagesize);
+			ret = -EIO;
+		}
+
+		jffs2_wbuf_recover(c);
+
+		return ret;
+	}
+
+	spin_lock(&c->erase_completion_lock);
+
+	/* Adjust free size of the block if we padded. */
+	if (pad) {
+		struct jffs2_eraseblock *jeb;
+
+		jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
+
+		D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
+			  (jeb==c->nextblock)?"next":"", jeb->offset));
+
+		/* wbuf_pagesize - wbuf_len is the amount of space that's to be 
+		   padded. If there is less free space in the block than that,
+		   something screwed up */
+		if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) {
+			printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
+			       c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len);
+			printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
+			       jeb->offset, jeb->free_size);
+			BUG();
+		}
+		jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len);
+		c->free_size -= (c->wbuf_pagesize - c->wbuf_len);
+		jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
+		c->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
+	}
+
+	/* Stick any now-obsoleted blocks on the erase_pending_list */
+	jffs2_refile_wbuf_blocks(c);
+	jffs2_clear_wbuf_ino_list(c);
+	spin_unlock(&c->erase_completion_lock);
+
+	memset(c->wbuf,0xff,c->wbuf_pagesize);
+	/* adjust write buffer offset, else we get a non contiguous write bug */
+	c->wbuf_ofs += c->wbuf_pagesize;
+	c->wbuf_len = 0;
+	return 0;
+}
+
+/* Trigger garbage collection to flush the write-buffer. 
+   If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
+   outstanding. If ino arg non-zero, do it only if a write for the 
+   given inode is outstanding. */
+int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
+{
+	uint32_t old_wbuf_ofs;
+	uint32_t old_wbuf_len;
+	int ret = 0;
+
+	D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
+
+	down(&c->alloc_sem);
+	if (!jffs2_wbuf_pending_for_ino(c, ino)) {
+		D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
+		up(&c->alloc_sem);
+		return 0;
+	}
+
+	old_wbuf_ofs = c->wbuf_ofs;
+	old_wbuf_len = c->wbuf_len;
+
+	if (c->unchecked_size) {
+		/* GC won't make any progress for a while */
+		D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
+		down_write(&c->wbuf_sem);
+		ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
+		up_write(&c->wbuf_sem);
+	} else while (old_wbuf_len &&
+		      old_wbuf_ofs == c->wbuf_ofs) {
+
+		up(&c->alloc_sem);
+
+		D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
+
+		ret = jffs2_garbage_collect_pass(c);
+		if (ret) {
+			/* GC failed. Flush it with padding instead */
+			down(&c->alloc_sem);
+			down_write(&c->wbuf_sem);
+			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
+			up_write(&c->wbuf_sem);
+			break;
+		}
+		down(&c->alloc_sem);
+	}
+
+	D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
+
+	up(&c->alloc_sem);
+	return ret;
+}
+
+/* Pad write-buffer to end and write it, wasting space. */
+int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
+{
+	int ret;
+
+	down_write(&c->wbuf_sem);
+	ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
+	up_write(&c->wbuf_sem);
+
+	return ret;
+}
+
+#define PAGE_DIV(x) ( (x) & (~(c->wbuf_pagesize - 1)) )
+#define PAGE_MOD(x) ( (x) & (c->wbuf_pagesize - 1) )
+int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino)
+{
+	struct kvec outvecs[3];
+	uint32_t totlen = 0;
+	uint32_t split_ofs = 0;
+	uint32_t old_totlen;
+	int ret, splitvec = -1;
+	int invec, outvec;
+	size_t wbuf_retlen;
+	unsigned char *wbuf_ptr;
+	size_t donelen = 0;
+	uint32_t outvec_to = to;
+
+	/* If not NAND flash, don't bother */
+	if (!c->wbuf)
+		return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
+	
+	down_write(&c->wbuf_sem);
+
+	/* If wbuf_ofs is not initialized, set it to target address */
+	if (c->wbuf_ofs == 0xFFFFFFFF) {
+		c->wbuf_ofs = PAGE_DIV(to);
+		c->wbuf_len = PAGE_MOD(to);			
+		memset(c->wbuf,0xff,c->wbuf_pagesize);
+	}
+
+	/* Fixup the wbuf if we are moving to a new eraseblock.  The checks below
+	   fail for ECC'd NOR because cleanmarker == 16, so a block starts at
+	   xxx0010.  */
+	if (jffs2_nor_ecc(c)) {
+		if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) {
+			c->wbuf_ofs = PAGE_DIV(to);
+			c->wbuf_len = PAGE_MOD(to);
+			memset(c->wbuf,0xff,c->wbuf_pagesize);
+		}
+	}
+	
+	/* Sanity checks on target address. 
+	   It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), 
+	   and it's permitted to write at the beginning of a new 
+	   erase block. Anything else, and you die.
+	   New block starts at xxx000c (0-b = block header)
+	*/
+	if ( (to & ~(c->sector_size-1)) != (c->wbuf_ofs & ~(c->sector_size-1)) ) {
+		/* It's a write to a new block */
+		if (c->wbuf_len) {
+			D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs));
+			ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
+			if (ret) {
+				/* the underlying layer has to check wbuf_len to do the cleanup */
+				D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
+				*retlen = 0;
+				goto exit;
+			}
+		}
+		/* set pointer to new block */
+		c->wbuf_ofs = PAGE_DIV(to);
+		c->wbuf_len = PAGE_MOD(to);			
+	} 
+
+	if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
+		/* We're not writing immediately after the writebuffer. Bad. */
+		printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to);
+		if (c->wbuf_len)
+			printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
+					  c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
+		BUG();
+	}
+
+	/* Note outvecs[3] above. We know count is never greater than 2 */
+	if (count > 2) {
+		printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count);
+		BUG();
+	}
+
+	invec = 0;
+	outvec = 0;
+
+	/* Fill writebuffer first, if already in use */	
+	if (c->wbuf_len) {
+		uint32_t invec_ofs = 0;
+
+		/* adjust alignment offset */ 
+		if (c->wbuf_len != PAGE_MOD(to)) {
+			c->wbuf_len = PAGE_MOD(to);
+			/* take care of alignment to next page */
+			if (!c->wbuf_len)
+				c->wbuf_len = c->wbuf_pagesize;
+		}
+		
+		while(c->wbuf_len < c->wbuf_pagesize) {
+			uint32_t thislen;
+			
+			if (invec == count)
+				goto alldone;
+
+			thislen = c->wbuf_pagesize - c->wbuf_len;
+
+			if (thislen >= invecs[invec].iov_len)
+				thislen = invecs[invec].iov_len;
+	
+			invec_ofs = thislen;
+
+			memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
+			c->wbuf_len += thislen;
+			donelen += thislen;
+			/* Get next invec, if actual did not fill the buffer */
+			if (c->wbuf_len < c->wbuf_pagesize) 
+				invec++;
+		}			
+		
+		/* write buffer is full, flush buffer */
+		ret = __jffs2_flush_wbuf(c, NOPAD);
+		if (ret) {
+			/* the underlying layer has to check wbuf_len to do the cleanup */
+			D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
+			/* Retlen zero to make sure our caller doesn't mark the space dirty.
+			   We've already done everything that's necessary */
+			*retlen = 0;
+			goto exit;
+		}
+		outvec_to += donelen;
+		c->wbuf_ofs = outvec_to;
+
+		/* All invecs done ? */
+		if (invec == count)
+			goto alldone;
+
+		/* Set up the first outvec, containing the remainder of the
+		   invec we partially used */
+		if (invecs[invec].iov_len > invec_ofs) {
+			outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs;
+			totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs;
+			if (totlen > c->wbuf_pagesize) {
+				splitvec = outvec;
+				split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen);
+			}
+			outvec++;
+		}
+		invec++;
+	}
+
+	/* OK, now we've flushed the wbuf and the start of the bits
+	   we have been asked to write, now to write the rest.... */
+
+	/* totlen holds the amount of data still to be written */
+	old_totlen = totlen;
+	for ( ; invec < count; invec++,outvec++ ) {
+		outvecs[outvec].iov_base = invecs[invec].iov_base;
+		totlen += outvecs[outvec].iov_len = invecs[invec].iov_len;
+		if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) {
+			splitvec = outvec;
+			split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen);
+			old_totlen = totlen;
+		}
+	}
+
+	/* Now the outvecs array holds all the remaining data to write */
+	/* Up to splitvec,split_ofs is to be written immediately. The rest
+	   goes into the (now-empty) wbuf */
+
+	if (splitvec != -1) {
+		uint32_t remainder;
+
+		remainder = outvecs[splitvec].iov_len - split_ofs;
+		outvecs[splitvec].iov_len = split_ofs;
+
+		/* We did cross a page boundary, so we write some now */
+		if (jffs2_cleanmarker_oob(c))
+			ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); 
+		else
+			ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
+		
+		if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
+			/* At this point we have no problem,
+			   c->wbuf is empty. 
+			*/
+			*retlen = donelen;
+			goto exit;
+		}
+		
+		donelen += wbuf_retlen;
+		c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen);
+
+		if (remainder) {
+			outvecs[splitvec].iov_base += split_ofs;
+			outvecs[splitvec].iov_len = remainder;
+		} else {
+			splitvec++;
+		}
+
+	} else {
+		splitvec = 0;
+	}
+
+	/* Now splitvec points to the start of the bits we have to copy
+	   into the wbuf */
+	wbuf_ptr = c->wbuf;
+
+	for ( ; splitvec < outvec; splitvec++) {
+		/* Don't copy the wbuf into itself */
+		if (outvecs[splitvec].iov_base == c->wbuf)
+			continue;
+		memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len);
+		wbuf_ptr += outvecs[splitvec].iov_len;
+		donelen += outvecs[splitvec].iov_len;
+	}
+	c->wbuf_len = wbuf_ptr - c->wbuf;
+
+	/* If there's a remainder in the wbuf and it's a non-GC write,
+	   remember that the wbuf affects this ino */
+alldone:
+	*retlen = donelen;
+
+	if (c->wbuf_len && ino)
+		jffs2_wbuf_dirties_inode(c, ino);
+
+	ret = 0;
+	
+exit:
+	up_write(&c->wbuf_sem);
+	return ret;
+}
+
+/*
+ *	This is the entry for flash write.
+ *	Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
+*/
+int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
+{
+	struct kvec vecs[1];
+
+	if (jffs2_can_mark_obsolete(c))
+		return c->mtd->write(c->mtd, ofs, len, retlen, buf);
+
+	vecs[0].iov_base = (unsigned char *) buf;
+	vecs[0].iov_len = len;
+	return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
+}
+
+/*
+	Handle readback from writebuffer and ECC failure return
+*/
+int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
+{
+	loff_t	orbf = 0, owbf = 0, lwbf = 0;
+	int	ret;
+
+	/* Read flash */
+	if (!jffs2_can_mark_obsolete(c)) {
+		down_read(&c->wbuf_sem);
+
+		if (jffs2_cleanmarker_oob(c))
+			ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo);
+		else
+			ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
+
+		if ( (ret == -EBADMSG) && (*retlen == len) ) {
+			printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
+			       len, ofs);
+			/* 
+			 * We have the raw data without ECC correction in the buffer, maybe 
+			 * we are lucky and all data or parts are correct. We check the node.
+			 * If data are corrupted node check will sort it out.
+			 * We keep this block, it will fail on write or erase and the we
+			 * mark it bad. Or should we do that now? But we should give him a chance.
+			 * Maybe we had a system crash or power loss before the ecc write or  
+			 * a erase was completed.
+			 * So we return success. :)
+			 */
+		 	ret = 0;
+		 }	
+	} else
+		return c->mtd->read(c->mtd, ofs, len, retlen, buf);
+
+	/* if no writebuffer available or write buffer empty, return */
+	if (!c->wbuf_pagesize || !c->wbuf_len)
+		goto exit;
+
+	/* if we read in a different block, return */
+	if ( (ofs & ~(c->sector_size-1)) != (c->wbuf_ofs & ~(c->sector_size-1)) ) 
+		goto exit;
+
+	if (ofs >= c->wbuf_ofs) {
+		owbf = (ofs - c->wbuf_ofs);	/* offset in write buffer */
+		if (owbf > c->wbuf_len)		/* is read beyond write buffer ? */
+			goto exit;
+		lwbf = c->wbuf_len - owbf;	/* number of bytes to copy */
+		if (lwbf > len)	
+			lwbf = len;
+	} else {	
+		orbf = (c->wbuf_ofs - ofs);	/* offset in read buffer */
+		if (orbf > len)			/* is write beyond write buffer ? */
+			goto exit;
+		lwbf = len - orbf; 		/* number of bytes to copy */
+		if (lwbf > c->wbuf_len)	
+			lwbf = c->wbuf_len;
+	}	
+	if (lwbf > 0)
+		memcpy(buf+orbf,c->wbuf+owbf,lwbf);
+
+exit:
+	up_read(&c->wbuf_sem);
+	return ret;
+}
+
+/*
+ *	Check, if the out of band area is empty
+ */
+int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
+{
+	unsigned char *buf;
+	int 	ret = 0;
+	int	i,len,page;
+	size_t  retlen;
+	int	oob_size;
+
+	/* allocate a buffer for all oob data in this sector */
+	oob_size = c->mtd->oobsize;
+	len = 4 * oob_size;
+	buf = kmalloc(len, GFP_KERNEL);
+	if (!buf) {
+		printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
+		return -ENOMEM;
+	}
+	/* 
+	 * if mode = 0, we scan for a total empty oob area, else we have
+	 * to take care of the cleanmarker in the first page of the block
+	*/
+	ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
+	if (ret) {
+		D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
+		goto out;
+	}
+	
+	if (retlen < len) {
+		D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
+			  "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
+		ret = -EIO;
+		goto out;
+	}
+	
+	/* Special check for first page */
+	for(i = 0; i < oob_size ; i++) {
+		/* Yeah, we know about the cleanmarker. */
+		if (mode && i >= c->fsdata_pos && 
+		    i < c->fsdata_pos + c->fsdata_len)
+			continue;
+
+		if (buf[i] != 0xFF) {
+			D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
+				  buf[page+i], page+i, jeb->offset));
+			ret = 1; 
+			goto out;
+		}
+	}
+
+	/* we know, we are aligned :) */	
+	for (page = oob_size; page < len; page += sizeof(long)) {
+		unsigned long dat = *(unsigned long *)(&buf[page]);
+		if(dat != -1) {
+			ret = 1; 
+			goto out;
+		}
+	}
+
+out:
+	kfree(buf);	
+	
+	return ret;
+}
+
+/*
+*	Scan for a valid cleanmarker and for bad blocks
+*	For virtual blocks (concatenated physical blocks) check the cleanmarker
+*	only in the first page of the first physical block, but scan for bad blocks in all
+*	physical blocks
+*/
+int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	struct jffs2_unknown_node n;
+	unsigned char buf[2 * NAND_MAX_OOBSIZE];
+	unsigned char *p;
+	int ret, i, cnt, retval = 0;
+	size_t retlen, offset;
+	int oob_size;
+
+	offset = jeb->offset;
+	oob_size = c->mtd->oobsize;
+
+	/* Loop through the physical blocks */
+	for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
+		/* Check first if the block is bad. */
+		if (c->mtd->block_isbad (c->mtd, offset)) {
+			D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
+			return 2;
+		}
+		/*
+		   *    We read oob data from page 0 and 1 of the block.
+		   *    page 0 contains cleanmarker and badblock info
+		   *    page 1 contains failure count of this block
+		 */
+		ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
+
+		if (ret) {
+			D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
+			return ret;
+		}
+		if (retlen < (oob_size << 1)) {
+			D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
+			return -EIO;
+		}
+
+		/* Check cleanmarker only on the first physical block */
+		if (!cnt) {
+			n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
+			n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
+			n.totlen = cpu_to_je32 (8);
+			p = (unsigned char *) &n;
+
+			for (i = 0; i < c->fsdata_len; i++) {
+				if (buf[c->fsdata_pos + i] != p[i]) {
+					retval = 1;
+				}
+			}
+			D1(if (retval == 1) {
+				printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
+				printk(KERN_WARNING "OOB at %08x was ", offset);
+				for (i=0; i < oob_size; i++) {
+					printk("%02x ", buf[i]);
+				}
+				printk("\n");
+			})
+		}
+		offset += c->mtd->erasesize;
+	}
+	return retval;
+}
+
+int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+	struct 	jffs2_unknown_node n;
+	int 	ret;
+	size_t 	retlen;
+
+	n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
+	n.totlen = cpu_to_je32(8);
+
+	ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
+	
+	if (ret) {
+		D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
+		return ret;
+	}
+	if (retlen != c->fsdata_len) {
+		D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
+		return ret;
+	}
+	return 0;
+}
+
+/* 
+ * On NAND we try to mark this block bad. If the block was erased more
+ * than MAX_ERASE_FAILURES we mark it finaly bad.
+ * Don't care about failures. This block remains on the erase-pending
+ * or badblock list as long as nobody manipulates the flash with
+ * a bootloader or something like that.
+ */
+
+int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
+{
+	int 	ret;
+
+	/* if the count is < max, we try to write the counter to the 2nd page oob area */
+	if( ++jeb->bad_count < MAX_ERASE_FAILURES)
+		return 0;
+
+	if (!c->mtd->block_markbad)
+		return 1; // What else can we do?
+
+	D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
+	ret = c->mtd->block_markbad(c->mtd, bad_offset);
+	
+	if (ret) {
+		D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
+		return ret;
+	}
+	return 1;
+}
+
+#define NAND_JFFS2_OOB16_FSDALEN	8
+
+static struct nand_oobinfo jffs2_oobinfo_docecc = {
+	.useecc = MTD_NANDECC_PLACE,
+	.eccbytes = 6,
+	.eccpos = {0,1,2,3,4,5}
+};
+
+
+static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
+{
+	struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
+
+	/* Do this only, if we have an oob buffer */
+	if (!c->mtd->oobsize)
+		return 0;
+	
+	/* Cleanmarker is out-of-band, so inline size zero */
+	c->cleanmarker_size = 0;
+
+	/* Should we use autoplacement ? */
+	if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
+		D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
+		/* Get the position of the free bytes */
+		if (!oinfo->oobfree[0][1]) {
+			printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
+			return -ENOSPC;
+		}
+		c->fsdata_pos = oinfo->oobfree[0][0];
+		c->fsdata_len = oinfo->oobfree[0][1];
+		if (c->fsdata_len > 8)
+			c->fsdata_len = 8;
+	} else {
+		/* This is just a legacy fallback and should go away soon */
+		switch(c->mtd->ecctype) {
+		case MTD_ECC_RS_DiskOnChip:
+			printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
+			c->oobinfo = &jffs2_oobinfo_docecc;
+			c->fsdata_pos = 6;
+			c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
+			c->badblock_pos = 15;
+			break;
+	
+		default:
+			D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
+{
+	int res;
+
+	/* Initialise write buffer */
+	init_rwsem(&c->wbuf_sem);
+	c->wbuf_pagesize = c->mtd->oobblock;
+	c->wbuf_ofs = 0xFFFFFFFF;
+	
+	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+	if (!c->wbuf)
+		return -ENOMEM;
+
+	res = jffs2_nand_set_oobinfo(c);
+
+#ifdef BREAKME
+	if (!brokenbuf)
+		brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+	if (!brokenbuf) {
+		kfree(c->wbuf);
+		return -ENOMEM;
+	}
+	memset(brokenbuf, 0xdb, c->wbuf_pagesize);
+#endif
+	return res;
+}
+
+void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
+{
+	kfree(c->wbuf);
+}
+
+#ifdef CONFIG_JFFS2_FS_NOR_ECC
+int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
+	/* Cleanmarker is actually larger on the flashes */
+	c->cleanmarker_size = 16;
+
+	/* Initialize write buffer */
+	init_rwsem(&c->wbuf_sem);
+	c->wbuf_pagesize = c->mtd->eccsize;
+	c->wbuf_ofs = 0xFFFFFFFF;
+
+	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+	if (!c->wbuf)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {
+	kfree(c->wbuf);
+}
+#endif
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
new file mode 100644
index 000000000000..80a5db542629
--- /dev/null
+++ b/fs/jffs2/write.c
@@ -0,0 +1,708 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: write.c,v 1.87 2004/11/16 20:36:12 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+#include "compr.h"
+
+
+int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri)
+{
+	struct jffs2_inode_cache *ic;
+
+	ic = jffs2_alloc_inode_cache();
+	if (!ic) {
+		return -ENOMEM;
+	}
+
+	memset(ic, 0, sizeof(*ic));
+
+	f->inocache = ic;
+	f->inocache->nlink = 1;
+	f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
+	f->inocache->ino = ++c->highest_ino;
+	f->inocache->state = INO_STATE_PRESENT;
+
+	ri->ino = cpu_to_je32(f->inocache->ino);
+
+	D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino));
+	jffs2_add_ino_cache(c, f->inocache);
+
+	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+	ri->totlen = cpu_to_je32(PAD(sizeof(*ri)));
+	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+	ri->mode = cpu_to_jemode(mode);
+
+	f->highest_version = 1;
+	ri->version = cpu_to_je32(f->highest_version);
+
+	return 0;
+}
+
+#if CONFIG_JFFS2_FS_DEBUG > 0
+static void writecheck(struct jffs2_sb_info *c, uint32_t ofs)
+{
+	unsigned char buf[16];
+	size_t retlen;
+	int ret, i;
+
+	ret = jffs2_flash_read(c, ofs, 16, &retlen, buf);
+	if (ret || (retlen != 16)) {
+		D1(printk(KERN_DEBUG "read failed or short in writecheck(). ret %d, retlen %zd\n", ret, retlen));
+		return;
+	}
+	ret = 0;
+	for (i=0; i<16; i++) {
+		if (buf[i] != 0xff)
+			ret = 1;
+	}
+	if (ret) {
+		printk(KERN_WARNING "ARGH. About to write node to 0x%08x on flash, but there are data already there:\n", ofs);
+		printk(KERN_WARNING "0x%08x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", 
+		       ofs,
+		       buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7],
+		       buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]);
+	}
+}
+#endif
+
+
+/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, 
+   write it to the flash, link it into the existing inode/fragment list */
+
+struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode)
+
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_full_dnode *fn;
+	size_t retlen;
+	struct kvec vecs[2];
+	int ret;
+	int retried = 0;
+	unsigned long cnt = 2;
+
+	D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) {
+		printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dnode()\n");
+		BUG();
+	}
+	   );
+	vecs[0].iov_base = ri;
+	vecs[0].iov_len = sizeof(*ri);
+	vecs[1].iov_base = (unsigned char *)data;
+	vecs[1].iov_len = datalen;
+
+	D1(writecheck(c, flash_ofs));
+
+	if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) {
+		printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen);
+	}
+	raw = jffs2_alloc_raw_node_ref();
+	if (!raw)
+		return ERR_PTR(-ENOMEM);
+	
+	fn = jffs2_alloc_full_dnode();
+	if (!fn) {
+		jffs2_free_raw_node_ref(raw);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fn->ofs = je32_to_cpu(ri->offset);
+	fn->size = je32_to_cpu(ri->dsize);
+	fn->frags = 0;
+
+	/* check number of valid vecs */
+	if (!datalen || !data)
+		cnt = 1;
+ retry:
+	fn->raw = raw;
+
+	raw->flash_offset = flash_ofs;
+	raw->__totlen = PAD(sizeof(*ri)+datalen);
+	raw->next_phys = NULL;
+
+	ret = jffs2_flash_writev(c, vecs, cnt, flash_ofs, &retlen,
+				 (alloc_mode==ALLOC_GC)?0:f->inocache->ino);
+
+	if (ret || (retlen != sizeof(*ri) + datalen)) {
+		printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", 
+		       sizeof(*ri)+datalen, flash_ofs, ret, retlen);
+
+		/* Mark the space as dirtied */
+		if (retlen) {
+			/* Doesn't belong to any inode */
+			raw->next_in_ino = NULL;
+
+			/* Don't change raw->size to match retlen. We may have 
+			   written the node header already, and only the data will
+			   seem corrupted, in which case the scan would skip over
+			   any node we write before the original intended end of 
+			   this node */
+			raw->flash_offset |= REF_OBSOLETE;
+			jffs2_add_physical_node_ref(c, raw);
+			jffs2_mark_node_obsolete(c, raw);
+		} else {
+			printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset);
+			jffs2_free_raw_node_ref(raw);
+		}
+		if (!retried && alloc_mode != ALLOC_NORETRY && (raw = jffs2_alloc_raw_node_ref())) {
+			/* Try to reallocate space and retry */
+			uint32_t dummy;
+			struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size];
+
+			retried = 1;
+
+			D1(printk(KERN_DEBUG "Retrying failed write.\n"));
+			
+			ACCT_SANITY_CHECK(c,jeb);
+			D1(ACCT_PARANOIA_CHECK(jeb));
+
+			if (alloc_mode == ALLOC_GC) {
+				ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, &dummy);
+			} else {
+				/* Locking pain */
+				up(&f->sem);
+				jffs2_complete_reservation(c);
+			
+				ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, &dummy, alloc_mode);
+				down(&f->sem);
+			}
+
+			if (!ret) {
+				D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
+
+				ACCT_SANITY_CHECK(c,jeb);
+				D1(ACCT_PARANOIA_CHECK(jeb));
+
+				goto retry;
+			}
+			D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+			jffs2_free_raw_node_ref(raw);
+		}
+		/* Release the full_dnode which is now useless, and return */
+		jffs2_free_full_dnode(fn);
+		return ERR_PTR(ret?ret:-EIO);
+	}
+	/* Mark the space used */
+	/* If node covers at least a whole page, or if it starts at the 
+	   beginning of a page and runs to the end of the file, or if 
+	   it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. 
+	*/
+	if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
+	    ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
+	      (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) ==  je32_to_cpu(ri->isize)))) {
+		raw->flash_offset |= REF_PRISTINE;
+	} else {
+		raw->flash_offset |= REF_NORMAL;
+	}
+	jffs2_add_physical_node_ref(c, raw);
+
+	/* Link into per-inode list */
+	spin_lock(&c->erase_completion_lock);
+	raw->next_in_ino = f->inocache->nodes;
+	f->inocache->nodes = raw;
+	spin_unlock(&c->erase_completion_lock);
+
+	D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
+		  flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize), 
+		  je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc),
+		  je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)));
+
+	if (retried) {
+		ACCT_SANITY_CHECK(c,NULL);
+	}
+
+	return fn;
+}
+
+struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode)
+{
+	struct jffs2_raw_node_ref *raw;
+	struct jffs2_full_dirent *fd;
+	size_t retlen;
+	struct kvec vecs[2];
+	int retried = 0;
+	int ret;
+
+	D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", 
+		  je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
+		  je32_to_cpu(rd->name_crc)));
+	D1(writecheck(c, flash_ofs));
+
+	D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
+		printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n");
+		BUG();
+	}
+	   );
+
+	vecs[0].iov_base = rd;
+	vecs[0].iov_len = sizeof(*rd);
+	vecs[1].iov_base = (unsigned char *)name;
+	vecs[1].iov_len = namelen;
+	
+	raw = jffs2_alloc_raw_node_ref();
+
+	if (!raw)
+		return ERR_PTR(-ENOMEM);
+
+	fd = jffs2_alloc_full_dirent(namelen+1);
+	if (!fd) {
+		jffs2_free_raw_node_ref(raw);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fd->version = je32_to_cpu(rd->version);
+	fd->ino = je32_to_cpu(rd->ino);
+	fd->nhash = full_name_hash(name, strlen(name));
+	fd->type = rd->type;
+	memcpy(fd->name, name, namelen);
+	fd->name[namelen]=0;
+
+ retry:
+	fd->raw = raw;
+
+	raw->flash_offset = flash_ofs;
+	raw->__totlen = PAD(sizeof(*rd)+namelen);
+	raw->next_phys = NULL;
+
+	ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen,
+				 (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino));
+	if (ret || (retlen != sizeof(*rd) + namelen)) {
+		printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", 
+			       sizeof(*rd)+namelen, flash_ofs, ret, retlen);
+		/* Mark the space as dirtied */
+		if (retlen) {
+			raw->next_in_ino = NULL;
+			raw->flash_offset |= REF_OBSOLETE;
+			jffs2_add_physical_node_ref(c, raw);
+			jffs2_mark_node_obsolete(c, raw);
+		} else {
+			printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset);
+			jffs2_free_raw_node_ref(raw);
+		}
+		if (!retried && (raw = jffs2_alloc_raw_node_ref())) {
+			/* Try to reallocate space and retry */
+			uint32_t dummy;
+			struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size];
+
+			retried = 1;
+
+			D1(printk(KERN_DEBUG "Retrying failed write.\n"));
+
+			ACCT_SANITY_CHECK(c,jeb);
+			D1(ACCT_PARANOIA_CHECK(jeb));
+
+			if (alloc_mode == ALLOC_GC) {
+				ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, &dummy);
+			} else {
+				/* Locking pain */
+				up(&f->sem);
+				jffs2_complete_reservation(c);
+			
+				ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, &dummy, alloc_mode);
+				down(&f->sem);
+			}
+
+			if (!ret) {
+				D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
+				ACCT_SANITY_CHECK(c,jeb);
+				D1(ACCT_PARANOIA_CHECK(jeb));
+				goto retry;
+			}
+			D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+			jffs2_free_raw_node_ref(raw);
+		}
+		/* Release the full_dnode which is now useless, and return */
+		jffs2_free_full_dirent(fd);
+		return ERR_PTR(ret?ret:-EIO);
+	}
+	/* Mark the space used */
+	raw->flash_offset |= REF_PRISTINE;
+	jffs2_add_physical_node_ref(c, raw);
+
+	spin_lock(&c->erase_completion_lock);
+	raw->next_in_ino = f->inocache->nodes;
+	f->inocache->nodes = raw;
+	spin_unlock(&c->erase_completion_lock);
+
+	if (retried) {
+		ACCT_SANITY_CHECK(c,NULL);
+	}
+
+	return fd;
+}
+
+/* The OS-specific code fills in the metadata in the jffs2_raw_inode for us, so that
+   we don't have to go digging in struct inode or its equivalent. It should set:
+   mode, uid, gid, (starting)isize, atime, ctime, mtime */
+int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+			    struct jffs2_raw_inode *ri, unsigned char *buf, 
+			    uint32_t offset, uint32_t writelen, uint32_t *retlen)
+{
+	int ret = 0;
+	uint32_t writtenlen = 0;
+
+       	D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n",
+		  f->inocache->ino, offset, writelen));
+		
+	while(writelen) {
+		struct jffs2_full_dnode *fn;
+		unsigned char *comprbuf = NULL;
+		uint16_t comprtype = JFFS2_COMPR_NONE;
+		uint32_t phys_ofs, alloclen;
+		uint32_t datalen, cdatalen;
+		int retried = 0;
+
+	retry:
+		D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset));
+
+		ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL);
+		if (ret) {
+			D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
+			break;
+		}
+		down(&f->sem);
+		datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
+		cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
+
+		comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
+
+		ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+		ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+		ri->totlen = cpu_to_je32(sizeof(*ri) + cdatalen);
+		ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+
+		ri->ino = cpu_to_je32(f->inocache->ino);
+		ri->version = cpu_to_je32(++f->highest_version);
+		ri->isize = cpu_to_je32(max(je32_to_cpu(ri->isize), offset + datalen));
+		ri->offset = cpu_to_je32(offset);
+		ri->csize = cpu_to_je32(cdatalen);
+		ri->dsize = cpu_to_je32(datalen);
+		ri->compr = comprtype & 0xff;
+		ri->usercompr = (comprtype >> 8 ) & 0xff;
+		ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+		ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
+
+		fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, phys_ofs, ALLOC_NORETRY);
+
+		jffs2_free_comprbuf(comprbuf, buf);
+
+		if (IS_ERR(fn)) {
+			ret = PTR_ERR(fn);
+			up(&f->sem);
+			jffs2_complete_reservation(c);
+			if (!retried) {
+				/* Write error to be retried */
+				retried = 1;
+				D1(printk(KERN_DEBUG "Retrying node write in jffs2_write_inode_range()\n"));
+				goto retry;
+			}
+			break;
+		}
+		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
+		if (f->metadata) {
+			jffs2_mark_node_obsolete(c, f->metadata->raw);
+			jffs2_free_full_dnode(f->metadata);
+			f->metadata = NULL;
+		}
+		if (ret) {
+			/* Eep */
+			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret));
+			jffs2_mark_node_obsolete(c, fn->raw);
+			jffs2_free_full_dnode(fn);
+
+			up(&f->sem);
+			jffs2_complete_reservation(c);
+			break;
+		}
+		up(&f->sem);
+		jffs2_complete_reservation(c);
+		if (!datalen) {
+			printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
+			ret = -EIO;
+			break;
+		}
+		D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen));
+		writtenlen += datalen;
+		offset += datalen;
+		writelen -= datalen;
+		buf += datalen;
+	}
+	*retlen = writtenlen;
+	return ret;
+}
+
+int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen)
+{
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dnode *fn;
+	struct jffs2_full_dirent *fd;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	/* Try to reserve enough space for both node and dirent. 
+	 * Just the node will do for now, though 
+	 */
+	ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL);
+	D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
+	if (ret) {
+		up(&f->sem);
+		return ret;
+	}
+
+	ri->data_crc = cpu_to_je32(0);
+	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+
+	fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+
+	D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n",
+		  jemode_to_cpu(ri->mode)));
+
+	if (IS_ERR(fn)) {
+		D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n"));
+		/* Eeek. Wave bye bye */
+		up(&f->sem);
+		jffs2_complete_reservation(c);
+		return PTR_ERR(fn);
+	}
+	/* No data here. Only a metadata node, which will be 
+	   obsoleted by the first data write
+	*/
+	f->metadata = fn;
+
+	up(&f->sem);
+	jffs2_complete_reservation(c);
+	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+		
+	if (ret) {
+		/* Eep. */
+		D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n"));
+		return ret;
+	}
+
+	rd = jffs2_alloc_raw_dirent();
+	if (!rd) {
+		/* Argh. Now we treat it like a normal delete */
+		jffs2_complete_reservation(c);
+		return -ENOMEM;
+	}
+
+	down(&dir_f->sem);
+
+	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd->pino = cpu_to_je32(dir_f->inocache->ino);
+	rd->version = cpu_to_je32(++dir_f->highest_version);
+	rd->ino = ri->ino;
+	rd->mctime = ri->ctime;
+	rd->nsize = namelen;
+	rd->type = DT_REG;
+	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+	rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
+
+	fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL);
+
+	jffs2_free_raw_dirent(rd);
+	
+	if (IS_ERR(fd)) {
+		/* dirent failed to write. Delete the inode normally 
+		   as if it were the final unlink() */
+		jffs2_complete_reservation(c);
+		up(&dir_f->sem);
+		return PTR_ERR(fd);
+	}
+
+	/* Link the fd into the inode's list, obsoleting an old
+	   one if necessary. */
+	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+	jffs2_complete_reservation(c);
+	up(&dir_f->sem);
+
+	return 0;
+}
+
+
+int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
+		    const char *name, int namelen, struct jffs2_inode_info *dead_f)
+{
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dirent *fd;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	if (1 /* alternative branch needs testing */ || 
+	    !jffs2_can_mark_obsolete(c)) {
+		/* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */
+
+		rd = jffs2_alloc_raw_dirent();
+		if (!rd)
+			return -ENOMEM;
+
+		ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_DELETION);
+		if (ret) {
+			jffs2_free_raw_dirent(rd);
+			return ret;
+		}
+
+		down(&dir_f->sem);
+
+		/* Build a deletion node */
+		rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+		rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+		rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+		rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+		
+		rd->pino = cpu_to_je32(dir_f->inocache->ino);
+		rd->version = cpu_to_je32(++dir_f->highest_version);
+		rd->ino = cpu_to_je32(0);
+		rd->mctime = cpu_to_je32(get_seconds());
+		rd->nsize = namelen;
+		rd->type = DT_UNKNOWN;
+		rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+		rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
+
+		fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION);
+		
+		jffs2_free_raw_dirent(rd);
+
+		if (IS_ERR(fd)) {
+			jffs2_complete_reservation(c);
+			up(&dir_f->sem);
+			return PTR_ERR(fd);
+		}
+
+		/* File it. This will mark the old one obsolete. */
+		jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+		up(&dir_f->sem);
+	} else {
+		struct jffs2_full_dirent **prev = &dir_f->dents;
+		uint32_t nhash = full_name_hash(name, namelen);
+
+		down(&dir_f->sem);
+
+		while ((*prev) && (*prev)->nhash <= nhash) {
+			if ((*prev)->nhash == nhash && 
+			    !memcmp((*prev)->name, name, namelen) &&
+			    !(*prev)->name[namelen]) {
+				struct jffs2_full_dirent *this = *prev;
+
+				D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n",
+					  this->ino, ref_offset(this->raw)));
+
+				*prev = this->next;
+				jffs2_mark_node_obsolete(c, (this->raw));
+				jffs2_free_full_dirent(this);
+				break;
+			}
+			prev = &((*prev)->next);
+		}
+		up(&dir_f->sem);
+	}
+
+	/* dead_f is NULL if this was a rename not a real unlink */
+	/* Also catch the !f->inocache case, where there was a dirent
+	   pointing to an inode which didn't exist. */
+	if (dead_f && dead_f->inocache) { 
+
+		down(&dead_f->sem);
+
+		while (dead_f->dents) {
+			/* There can be only deleted ones */
+			fd = dead_f->dents;
+			
+			dead_f->dents = fd->next;
+			
+			if (fd->ino) {
+				printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
+				       dead_f->inocache->ino, fd->name, fd->ino);
+			} else {
+				D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", fd->name, dead_f->inocache->ino));
+			}
+			jffs2_mark_node_obsolete(c, fd->raw);
+			jffs2_free_full_dirent(fd);
+		}
+
+		dead_f->inocache->nlink--;
+		/* NB: Caller must set inode nlink if appropriate */
+		up(&dead_f->sem);
+	}
+
+	jffs2_complete_reservation(c);
+
+	return 0;
+}
+
+
+int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen)
+{
+	struct jffs2_raw_dirent *rd;
+	struct jffs2_full_dirent *fd;
+	uint32_t alloclen, phys_ofs;
+	int ret;
+
+	rd = jffs2_alloc_raw_dirent();
+	if (!rd)
+		return -ENOMEM;
+
+	ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+	if (ret) {
+		jffs2_free_raw_dirent(rd);
+		return ret;
+	}
+	
+	down(&dir_f->sem);
+
+	/* Build a deletion node */
+	rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+	rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+	rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+	rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+	rd->pino = cpu_to_je32(dir_f->inocache->ino);
+	rd->version = cpu_to_je32(++dir_f->highest_version);
+	rd->ino = cpu_to_je32(ino);
+	rd->mctime = cpu_to_je32(get_seconds());
+	rd->nsize = namelen;
+
+	rd->type = type;
+
+	rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+	rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
+
+	fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL);
+	
+	jffs2_free_raw_dirent(rd);
+
+	if (IS_ERR(fd)) {
+		jffs2_complete_reservation(c);
+		up(&dir_f->sem);
+		return PTR_ERR(fd);
+	}
+
+	/* File it. This will mark the old one obsolete. */
+	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+	jffs2_complete_reservation(c);
+	up(&dir_f->sem);
+
+	return 0;
+}
diff --git a/fs/jffs2/writev.c b/fs/jffs2/writev.c
new file mode 100644
index 000000000000..f079f8388566
--- /dev/null
+++ b/fs/jffs2/writev.c
@@ -0,0 +1,50 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: writev.c,v 1.6 2004/11/16 20:36:12 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+
+/* This ought to be in core MTD code. All registered MTD devices
+   without writev should have this put in place. Bug the MTD
+   maintainer */
+static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs,
+				  unsigned long count, loff_t to, size_t *retlen)
+{
+	unsigned long i;
+	size_t totlen = 0, thislen;
+	int ret = 0;
+
+	for (i=0; i<count; i++) {
+		if (!vecs[i].iov_len)
+			continue;
+		ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
+		totlen += thislen;
+		if (ret || thislen != vecs[i].iov_len)
+			break;
+		to += vecs[i].iov_len;
+	}
+	if (retlen)
+		*retlen = totlen;
+	return ret;
+}
+
+int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
+			      unsigned long count, loff_t to, size_t *retlen)
+{
+	if (c->mtd->writev)
+		return c->mtd->writev(c->mtd, vecs, count, to, retlen);
+	else
+		return mtd_fake_writev(c->mtd, vecs, count, to, retlen);
+}
+