# Makefile for the linux journaling routines.
 #
 
-obj-$(CONFIG_JBD) += jbd.o
+obj-$(CONFIG_JBD2) += jbd2.o
 
-jbd-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
+jbd2-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
 
 
 #include <linux/time.h>
 #include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
 
 
        if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
                JBUFFER_TRACE(jh, "remove from checkpoint list");
-               ret = __journal_remove_checkpoint(jh) + 1;
+               ret = __jbd2_journal_remove_checkpoint(jh) + 1;
                jbd_unlock_bh_state(bh);
-               journal_remove_journal_head(bh);
+               jbd2_journal_remove_journal_head(bh);
                BUFFER_TRACE(bh, "release");
                __brelse(bh);
        } else {
 }
 
 /*
- * __log_wait_for_space: wait until there is space in the journal.
+ * __jbd2_log_wait_for_space: wait until there is space in the journal.
  *
  * Called under j-state_lock *only*.  It will be unlocked if we have to wait
  * for a checkpoint to free up some space in the log.
  */
-void __log_wait_for_space(journal_t *journal)
+void __jbd2_log_wait_for_space(journal_t *journal)
 {
        int nblocks;
        assert_spin_locked(&journal->j_state_lock);
 
        nblocks = jbd_space_needed(journal);
-       while (__log_space_left(journal) < nblocks) {
-               if (journal->j_flags & JFS_ABORT)
+       while (__jbd2_log_space_left(journal) < nblocks) {
+               if (journal->j_flags & JBD2_ABORT)
                        return;
                spin_unlock(&journal->j_state_lock);
                mutex_lock(&journal->j_checkpoint_mutex);
                 */
                spin_lock(&journal->j_state_lock);
                nblocks = jbd_space_needed(journal);
-               if (__log_space_left(journal) < nblocks) {
+               if (__jbd2_log_space_left(journal) < nblocks) {
                        spin_unlock(&journal->j_state_lock);
-                       log_do_checkpoint(journal);
+                       jbd2_log_do_checkpoint(journal);
                        spin_lock(&journal->j_state_lock);
                }
                mutex_unlock(&journal->j_checkpoint_mutex);
                 * Now in whatever state the buffer currently is, we know that
                 * it has been written out and so we can drop it from the list
                 */
-               released = __journal_remove_checkpoint(jh);
+               released = __jbd2_journal_remove_checkpoint(jh);
                jbd_unlock_bh_state(bh);
-               journal_remove_journal_head(bh);
+               jbd2_journal_remove_journal_head(bh);
                __brelse(bh);
        }
 }
 
                spin_unlock(&journal->j_list_lock);
                jbd_unlock_bh_state(bh);
-               log_start_commit(journal, tid);
-               log_wait_commit(journal, tid);
+               jbd2_log_start_commit(journal, tid);
+               jbd2_log_wait_commit(journal, tid);
                ret = 1;
        } else if (!buffer_dirty(bh)) {
                J_ASSERT_JH(jh, !buffer_jbddirty(bh));
                BUFFER_TRACE(bh, "remove from checkpoint");
-               __journal_remove_checkpoint(jh);
+               __jbd2_journal_remove_checkpoint(jh);
                spin_unlock(&journal->j_list_lock);
                jbd_unlock_bh_state(bh);
-               journal_remove_journal_head(bh);
+               jbd2_journal_remove_journal_head(bh);
                __brelse(bh);
                ret = 1;
        } else {
  *
  * The journal should be locked before calling this function.
  */
-int log_do_checkpoint(journal_t *journal)
+int jbd2_log_do_checkpoint(journal_t *journal)
 {
        transaction_t *transaction;
        tid_t this_tid;
         * don't need checkpointing, just eliminate them from the
         * journal straight away.
         */
-       result = cleanup_journal_tail(journal);
+       result = jbd2_cleanup_journal_tail(journal);
        jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
        if (result <= 0)
                return result;
        }
 out:
        spin_unlock(&journal->j_list_lock);
-       result = cleanup_journal_tail(journal);
+       result = jbd2_cleanup_journal_tail(journal);
        if (result < 0)
                return result;
        return 0;
  * we have an abort error outstanding.
  */
 
-int cleanup_journal_tail(journal_t *journal)
+int jbd2_cleanup_journal_tail(journal_t *journal)
 {
        transaction_t * transaction;
        tid_t           first_tid;
        journal->j_tail_sequence = first_tid;
        journal->j_tail = blocknr;
        spin_unlock(&journal->j_state_lock);
-       if (!(journal->j_flags & JFS_ABORT))
-               journal_update_superblock(journal, 1);
+       if (!(journal->j_flags & JBD2_ABORT))
+               jbd2_journal_update_superblock(journal, 1);
        return 0;
 }
 
  * Returns number of buffers reaped (for debug)
  */
 
-int __journal_clean_checkpoint_list(journal_t *journal)
+int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
 {
        transaction_t *transaction, *last_transaction, *next_transaction;
        int ret = 0;
  * This function is called with jbd_lock_bh_state(jh2bh(jh))
  */
 
-int __journal_remove_checkpoint(struct journal_head *jh)
+int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
 {
        transaction_t *transaction;
        journal_t *journal;
         * dropped!
         *
         * The locking here around j_committing_transaction is a bit sleazy.
-        * See the comment at the end of journal_commit_transaction().
+        * See the comment at the end of jbd2_journal_commit_transaction().
         */
        if (transaction == journal->j_committing_transaction) {
                JBUFFER_TRACE(jh, "belongs to committing transaction");
        /* OK, that was the last buffer for the transaction: we can now
           safely remove this transaction from the log */
 
-       __journal_drop_transaction(journal, transaction);
+       __jbd2_journal_drop_transaction(journal, transaction);
 
        /* Just in case anybody was waiting for more transactions to be
            checkpointed... */
  * Called with the journal locked.
  * Called with j_list_lock held.
  */
-void __journal_insert_checkpoint(struct journal_head *jh,
+void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
                               transaction_t *transaction)
 {
        JBUFFER_TRACE(jh, "entry");
  * Called with j_list_lock held.
  */
 
-void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
+void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction)
 {
        assert_spin_locked(&journal->j_list_lock);
        if (transaction->t_cpnext) {
 
 /*
- * linux/fs/jbd/commit.c
+ * linux/fs/jbd2/commit.c
  *
  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
  *
 
 #include <linux/time.h>
 #include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
        if (is_journal_aborted(journal))
                return 0;
 
-       descriptor = journal_get_descriptor_buffer(journal);
+       descriptor = jbd2_journal_get_descriptor_buffer(journal);
        if (!descriptor)
                return 1;
 
        /* AKPM: buglet - add `i' to tmp! */
        for (i = 0; i < bh->b_size; i += 512) {
                journal_header_t *tmp = (journal_header_t*)bh->b_data;
-               tmp->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
-               tmp->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
+               tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
+               tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
                tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
        }
 
        JBUFFER_TRACE(descriptor, "write commit block");
        set_buffer_dirty(bh);
-       if (journal->j_flags & JFS_BARRIER) {
+       if (journal->j_flags & JBD2_BARRIER) {
                set_buffer_ordered(bh);
                barrier_done = 1;
        }
                        "disabling barriers\n",
                        bdevname(journal->j_dev, b));
                spin_lock(&journal->j_state_lock);
-               journal->j_flags &= ~JFS_BARRIER;
+               journal->j_flags &= ~JBD2_BARRIER;
                spin_unlock(&journal->j_state_lock);
 
                /* And try again, without the barrier */
                ret = sync_dirty_buffer(bh);
        }
        put_bh(bh);             /* One for getblk() */
-       journal_put_journal_head(descriptor);
+       jbd2_journal_put_journal_head(descriptor);
 
        return (ret == -EIO);
 }
                if (locked && test_clear_buffer_dirty(bh)) {
                        BUFFER_TRACE(bh, "needs writeout, adding to array");
                        wbuf[bufs++] = bh;
-                       __journal_file_buffer(jh, commit_transaction,
+                       __jbd2_journal_file_buffer(jh, commit_transaction,
                                                BJ_Locked);
                        jbd_unlock_bh_state(bh);
                        if (bufs == journal->j_wbufsize) {
                }
                else {
                        BUFFER_TRACE(bh, "writeout complete: unfile");
-                       __journal_unfile_buffer(jh);
+                       __jbd2_journal_unfile_buffer(jh);
                        jbd_unlock_bh_state(bh);
                        if (locked)
                                unlock_buffer(bh);
-                       journal_remove_journal_head(bh);
+                       jbd2_journal_remove_journal_head(bh);
                        /* Once for our safety reference, once for
-                        * journal_remove_journal_head() */
+                        * jbd2_journal_remove_journal_head() */
                        put_bh(bh);
                        put_bh(bh);
                }
 }
 
 /*
- * journal_commit_transaction
+ * jbd2_journal_commit_transaction
  *
  * The primary function for committing a transaction to the log.  This
  * function is called by the journal thread to begin a complete commit.
  */
-void journal_commit_transaction(journal_t *journal)
+void jbd2_journal_commit_transaction(journal_t *journal)
 {
        transaction_t *commit_transaction;
        struct journal_head *jh, *new_jh, *descriptor;
        spin_unlock(&journal->j_list_lock);
 #endif
 
-       /* Do we need to erase the effects of a prior journal_flush? */
-       if (journal->j_flags & JFS_FLUSHED) {
+       /* Do we need to erase the effects of a prior jbd2_journal_flush? */
+       if (journal->j_flags & JBD2_FLUSHED) {
                jbd_debug(3, "super block updated\n");
-               journal_update_superblock(journal, 1);
+               jbd2_journal_update_superblock(journal, 1);
        } else {
                jbd_debug(3, "superblock not updated\n");
        }
         * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
         * that there are no such buffers: if a large filesystem
         * operation like a truncate needs to split itself over multiple
-        * transactions, then it may try to do a journal_restart() while
+        * transactions, then it may try to do a jbd2_journal_restart() while
         * there are still BJ_Reserved buffers outstanding.  These must
         * be released cleanly from the current transaction.
         *
         * again before modifying the buffer in the new transaction, but
         * we do not require it to remember exactly which old buffers it
         * has reserved.  This is consistent with the existing behaviour
-        * that multiple journal_get_write_access() calls to the same
+        * that multiple jbd2_journal_get_write_access() calls to the same
         * buffer are perfectly permissable.
         */
        while (commit_transaction->t_reserved_list) {
                jh = commit_transaction->t_reserved_list;
                JBUFFER_TRACE(jh, "reserved, unused: refile");
                /*
-                * A journal_get_undo_access()+journal_release_buffer() may
+                * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
                 * leave undo-committed data.
                 */
                if (jh->b_committed_data) {
                        struct buffer_head *bh = jh2bh(jh);
 
                        jbd_lock_bh_state(bh);
-                       jbd_slab_free(jh->b_committed_data, bh->b_size);
+                       jbd2_slab_free(jh->b_committed_data, bh->b_size);
                        jh->b_committed_data = NULL;
                        jbd_unlock_bh_state(bh);
                }
-               journal_refile_buffer(journal, jh);
+               jbd2_journal_refile_buffer(journal, jh);
        }
 
        /*
         * frees some memory
         */
        spin_lock(&journal->j_list_lock);
-       __journal_clean_checkpoint_list(journal);
+       __jbd2_journal_clean_checkpoint_list(journal);
        spin_unlock(&journal->j_list_lock);
 
        jbd_debug (3, "JBD: commit phase 1\n");
        /*
         * Switch to a new revoke table.
         */
-       journal_switch_revoke_table(journal);
+       jbd2_journal_switch_revoke_table(journal);
 
        commit_transaction->t_state = T_FLUSH;
        journal->j_committing_transaction = commit_transaction;
                        continue;
                }
                if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
-                       __journal_unfile_buffer(jh);
+                       __jbd2_journal_unfile_buffer(jh);
                        jbd_unlock_bh_state(bh);
-                       journal_remove_journal_head(bh);
+                       jbd2_journal_remove_journal_head(bh);
                        put_bh(bh);
                } else {
                        jbd_unlock_bh_state(bh);
        spin_unlock(&journal->j_list_lock);
 
        if (err)
-               __journal_abort_hard(journal);
+               __jbd2_journal_abort_hard(journal);
 
-       journal_write_revoke_records(journal, commit_transaction);
+       jbd2_journal_write_revoke_records(journal, commit_transaction);
 
        jbd_debug(3, "JBD: commit phase 2\n");
 
 
                if (is_journal_aborted(journal)) {
                        JBUFFER_TRACE(jh, "journal is aborting: refile");
-                       journal_refile_buffer(journal, jh);
+                       jbd2_journal_refile_buffer(journal, jh);
                        /* If that was the last one, we need to clean up
                         * any descriptor buffers which may have been
                         * already allocated, even if we are now
 
                        jbd_debug(4, "JBD: get descriptor\n");
 
-                       descriptor = journal_get_descriptor_buffer(journal);
+                       descriptor = jbd2_journal_get_descriptor_buffer(journal);
                        if (!descriptor) {
-                               __journal_abort_hard(journal);
+                               __jbd2_journal_abort_hard(journal);
                                continue;
                        }
 
                        jbd_debug(4, "JBD: got buffer %llu (%p)\n",
                                (unsigned long long)bh->b_blocknr, bh->b_data);
                        header = (journal_header_t *)&bh->b_data[0];
-                       header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
-                       header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
+                       header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
+                       header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
                        header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
 
                        tagp = &bh->b_data[sizeof(journal_header_t)];
                        /* Record it so that we can wait for IO
                            completion later */
                        BUFFER_TRACE(bh, "ph3: file as descriptor");
-                       journal_file_buffer(descriptor, commit_transaction,
+                       jbd2_journal_file_buffer(descriptor, commit_transaction,
                                        BJ_LogCtl);
                }
 
                /* Where is the buffer to be written? */
 
-               err = journal_next_log_block(journal, &blocknr);
+               err = jbd2_journal_next_log_block(journal, &blocknr);
                /* If the block mapping failed, just abandon the buffer
                   and repeat this loop: we'll fall into the
                   refile-on-abort condition above. */
                if (err) {
-                       __journal_abort_hard(journal);
+                       __jbd2_journal_abort_hard(journal);
                        continue;
                }
 
                /*
                 * start_this_handle() uses t_outstanding_credits to determine
                 * the free space in the log, but this counter is changed
-                * by journal_next_log_block() also.
+                * by jbd2_journal_next_log_block() also.
                 */
                commit_transaction->t_outstanding_credits--;
 
 
                set_bit(BH_JWrite, &jh2bh(jh)->b_state);
                /*
-                * akpm: journal_write_metadata_buffer() sets
+                * akpm: jbd2_journal_write_metadata_buffer() sets
                 * new_bh->b_transaction to commit_transaction.
                 * We need to clean this up before we release new_bh
                 * (which is of type BJ_IO)
                 */
                JBUFFER_TRACE(jh, "ph3: write metadata");
-               flags = journal_write_metadata_buffer(commit_transaction,
+               flags = jbd2_journal_write_metadata_buffer(commit_transaction,
                                                      jh, &new_jh, blocknr);
                set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
                wbuf[bufs++] = jh2bh(new_jh);
 
                tag_flag = 0;
                if (flags & 1)
-                       tag_flag |= JFS_FLAG_ESCAPE;
+                       tag_flag |= JBD2_FLAG_ESCAPE;
                if (!first_tag)
-                       tag_flag |= JFS_FLAG_SAME_UUID;
+                       tag_flag |= JBD2_FLAG_SAME_UUID;
 
                tag = (journal_block_tag_t *) tagp;
                tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
                            submitting the IOs.  "tag" still points to
                            the last tag we set up. */
 
-                       tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
+                       tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
 
 start_journal_io:
                        for (i = 0; i < bufs; i++) {
                clear_buffer_jwrite(bh);
 
                JBUFFER_TRACE(jh, "ph4: unfile after journal write");
-               journal_unfile_buffer(journal, jh);
+               jbd2_journal_unfile_buffer(journal, jh);
 
                /*
                 * ->t_iobuf_list should contain only dummy buffer_heads
-                * which were created by journal_write_metadata_buffer().
+                * which were created by jbd2_journal_write_metadata_buffer().
                 */
                BUFFER_TRACE(bh, "dumping temporary bh");
-               journal_put_journal_head(jh);
+               jbd2_journal_put_journal_head(jh);
                __brelse(bh);
                J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
                free_buffer_head(bh);
                    we finally commit, we can do any checkpointing
                    required. */
                JBUFFER_TRACE(jh, "file as BJ_Forget");
-               journal_file_buffer(jh, commit_transaction, BJ_Forget);
+               jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
                /* Wake up any transactions which were waiting for this
                   IO to complete */
                wake_up_bit(&bh->b_state, BH_Unshadow);
 
                BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
                clear_buffer_jwrite(bh);
-               journal_unfile_buffer(journal, jh);
-               journal_put_journal_head(jh);
+               jbd2_journal_unfile_buffer(journal, jh);
+               jbd2_journal_put_journal_head(jh);
                __brelse(bh);           /* One for getblk */
                /* AKPM: bforget here */
        }
                err = -EIO;
 
        if (err)
-               __journal_abort_hard(journal);
+               __jbd2_journal_abort_hard(journal);
 
        /* End of a transaction!  Finally, we can do checkpoint
            processing: any buffers committed as a result of this
                 * Otherwise, we can just throw away the frozen data now.
                 */
                if (jh->b_committed_data) {
-                       jbd_slab_free(jh->b_committed_data, bh->b_size);
+                       jbd2_slab_free(jh->b_committed_data, bh->b_size);
                        jh->b_committed_data = NULL;
                        if (jh->b_frozen_data) {
                                jh->b_committed_data = jh->b_frozen_data;
                                jh->b_frozen_data = NULL;
                        }
                } else if (jh->b_frozen_data) {
-                       jbd_slab_free(jh->b_frozen_data, bh->b_size);
+                       jbd2_slab_free(jh->b_frozen_data, bh->b_size);
                        jh->b_frozen_data = NULL;
                }
 
                cp_transaction = jh->b_cp_transaction;
                if (cp_transaction) {
                        JBUFFER_TRACE(jh, "remove from old cp transaction");
-                       __journal_remove_checkpoint(jh);
+                       __jbd2_journal_remove_checkpoint(jh);
                }
 
                /* Only re-checkpoint the buffer_head if it is marked
                 * dirty.  If the buffer was added to the BJ_Forget list
-                * by journal_forget, it may no longer be dirty and
+                * by jbd2_journal_forget, it may no longer be dirty and
                 * there's no point in keeping a checkpoint record for
                 * it. */
 
 
                if (buffer_jbddirty(bh)) {
                        JBUFFER_TRACE(jh, "add to new checkpointing trans");
-                       __journal_insert_checkpoint(jh, commit_transaction);
+                       __jbd2_journal_insert_checkpoint(jh, commit_transaction);
                        JBUFFER_TRACE(jh, "refile for checkpoint writeback");
-                       __journal_refile_buffer(jh);
+                       __jbd2_journal_refile_buffer(jh);
                        jbd_unlock_bh_state(bh);
                } else {
                        J_ASSERT_BH(bh, !buffer_dirty(bh));
                         * disk and before we process the buffer on BJ_Forget
                         * list. */
                        JBUFFER_TRACE(jh, "refile or unfile freed buffer");
-                       __journal_refile_buffer(jh);
+                       __jbd2_journal_refile_buffer(jh);
                        if (!jh->b_transaction) {
                                jbd_unlock_bh_state(bh);
                                 /* needs a brelse */
-                               journal_remove_journal_head(bh);
+                               jbd2_journal_remove_journal_head(bh);
                                release_buffer_page(bh);
                        } else
                                jbd_unlock_bh_state(bh);
        spin_unlock(&journal->j_list_lock);
        /*
         * This is a bit sleazy.  We borrow j_list_lock to protect
-        * journal->j_committing_transaction in __journal_remove_checkpoint.
-        * Really, __journal_remove_checkpoint should be using j_state_lock but
-        * it's a bit hassle to hold that across __journal_remove_checkpoint
+        * journal->j_committing_transaction in __jbd2_journal_remove_checkpoint.
+        * Really, __jbd2_journal_remove_checkpoint should be using j_state_lock but
+        * it's a bit hassle to hold that across __jbd2_journal_remove_checkpoint
         */
        spin_lock(&journal->j_state_lock);
        spin_lock(&journal->j_list_lock);
        spin_unlock(&journal->j_state_lock);
 
        if (commit_transaction->t_checkpoint_list == NULL) {
-               __journal_drop_transaction(journal, commit_transaction);
+               __jbd2_journal_drop_transaction(journal, commit_transaction);
        } else {
                if (journal->j_checkpoint_transactions == NULL) {
                        journal->j_checkpoint_transactions = commit_transaction;
 
 /*
- * linux/fs/jbd/journal.c
+ * linux/fs/jbd2/journal.c
  *
  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
  *
 #include <linux/module.h>
 #include <linux/time.h>
 #include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <asm/uaccess.h>
 #include <asm/page.h>
 
-EXPORT_SYMBOL(journal_start);
-EXPORT_SYMBOL(journal_restart);
-EXPORT_SYMBOL(journal_extend);
-EXPORT_SYMBOL(journal_stop);
-EXPORT_SYMBOL(journal_lock_updates);
-EXPORT_SYMBOL(journal_unlock_updates);
-EXPORT_SYMBOL(journal_get_write_access);
-EXPORT_SYMBOL(journal_get_create_access);
-EXPORT_SYMBOL(journal_get_undo_access);
-EXPORT_SYMBOL(journal_dirty_data);
-EXPORT_SYMBOL(journal_dirty_metadata);
-EXPORT_SYMBOL(journal_release_buffer);
-EXPORT_SYMBOL(journal_forget);
+EXPORT_SYMBOL(jbd2_journal_start);
+EXPORT_SYMBOL(jbd2_journal_restart);
+EXPORT_SYMBOL(jbd2_journal_extend);
+EXPORT_SYMBOL(jbd2_journal_stop);
+EXPORT_SYMBOL(jbd2_journal_lock_updates);
+EXPORT_SYMBOL(jbd2_journal_unlock_updates);
+EXPORT_SYMBOL(jbd2_journal_get_write_access);
+EXPORT_SYMBOL(jbd2_journal_get_create_access);
+EXPORT_SYMBOL(jbd2_journal_get_undo_access);
+EXPORT_SYMBOL(jbd2_journal_dirty_data);
+EXPORT_SYMBOL(jbd2_journal_dirty_metadata);
+EXPORT_SYMBOL(jbd2_journal_release_buffer);
+EXPORT_SYMBOL(jbd2_journal_forget);
 #if 0
 EXPORT_SYMBOL(journal_sync_buffer);
 #endif
-EXPORT_SYMBOL(journal_flush);
-EXPORT_SYMBOL(journal_revoke);
-
-EXPORT_SYMBOL(journal_init_dev);
-EXPORT_SYMBOL(journal_init_inode);
-EXPORT_SYMBOL(journal_update_format);
-EXPORT_SYMBOL(journal_check_used_features);
-EXPORT_SYMBOL(journal_check_available_features);
-EXPORT_SYMBOL(journal_set_features);
-EXPORT_SYMBOL(journal_create);
-EXPORT_SYMBOL(journal_load);
-EXPORT_SYMBOL(journal_destroy);
-EXPORT_SYMBOL(journal_update_superblock);
-EXPORT_SYMBOL(journal_abort);
-EXPORT_SYMBOL(journal_errno);
-EXPORT_SYMBOL(journal_ack_err);
-EXPORT_SYMBOL(journal_clear_err);
-EXPORT_SYMBOL(log_wait_commit);
-EXPORT_SYMBOL(journal_start_commit);
-EXPORT_SYMBOL(journal_force_commit_nested);
-EXPORT_SYMBOL(journal_wipe);
-EXPORT_SYMBOL(journal_blocks_per_page);
-EXPORT_SYMBOL(journal_invalidatepage);
-EXPORT_SYMBOL(journal_try_to_free_buffers);
-EXPORT_SYMBOL(journal_force_commit);
+EXPORT_SYMBOL(jbd2_journal_flush);
+EXPORT_SYMBOL(jbd2_journal_revoke);
+
+EXPORT_SYMBOL(jbd2_journal_init_dev);
+EXPORT_SYMBOL(jbd2_journal_init_inode);
+EXPORT_SYMBOL(jbd2_journal_update_format);
+EXPORT_SYMBOL(jbd2_journal_check_used_features);
+EXPORT_SYMBOL(jbd2_journal_check_available_features);
+EXPORT_SYMBOL(jbd2_journal_set_features);
+EXPORT_SYMBOL(jbd2_journal_create);
+EXPORT_SYMBOL(jbd2_journal_load);
+EXPORT_SYMBOL(jbd2_journal_destroy);
+EXPORT_SYMBOL(jbd2_journal_update_superblock);
+EXPORT_SYMBOL(jbd2_journal_abort);
+EXPORT_SYMBOL(jbd2_journal_errno);
+EXPORT_SYMBOL(jbd2_journal_ack_err);
+EXPORT_SYMBOL(jbd2_journal_clear_err);
+EXPORT_SYMBOL(jbd2_log_wait_commit);
+EXPORT_SYMBOL(jbd2_journal_start_commit);
+EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
+EXPORT_SYMBOL(jbd2_journal_wipe);
+EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
+EXPORT_SYMBOL(jbd2_journal_invalidatepage);
+EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
+EXPORT_SYMBOL(jbd2_journal_force_commit);
 
 static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
 static void __journal_abort_soft (journal_t *journal, int errno);
-static int journal_create_jbd_slab(size_t slab_size);
+static int jbd2_journal_create_jbd_slab(size_t slab_size);
 
 /*
  * Helper function used to manage commit timeouts
 }
 
 /*
- * kjournald: The main thread function used to manage a logging device
+ * kjournald2: The main thread function used to manage a logging device
  * journal.
  *
  * This kernel thread is responsible for two things:
  *    known as checkpointing, and this thread is responsible for that job.
  */
 
-static int kjournald(void *arg)
+static int kjournald2(void *arg)
 {
        journal_t *journal = arg;
        transaction_t *transaction;
        journal->j_task = current;
        wake_up(&journal->j_wait_done_commit);
 
-       printk(KERN_INFO "kjournald starting.  Commit interval %ld seconds\n",
+       printk(KERN_INFO "kjournald2 starting.  Commit interval %ld seconds\n",
                        journal->j_commit_interval / HZ);
 
        /*
        spin_lock(&journal->j_state_lock);
 
 loop:
-       if (journal->j_flags & JFS_UNMOUNT)
+       if (journal->j_flags & JBD2_UNMOUNT)
                goto end_loop;
 
        jbd_debug(1, "commit_sequence=%d, commit_request=%d\n",
                jbd_debug(1, "OK, requests differ\n");
                spin_unlock(&journal->j_state_lock);
                del_timer_sync(&journal->j_commit_timer);
-               journal_commit_transaction(journal);
+               jbd2_journal_commit_transaction(journal);
                spin_lock(&journal->j_state_lock);
                goto loop;
        }
                 * good idea, because that depends on threads that may
                 * be already stopped.
                 */
-               jbd_debug(1, "Now suspending kjournald\n");
+               jbd_debug(1, "Now suspending kjournald2\n");
                spin_unlock(&journal->j_state_lock);
                refrigerator();
                spin_lock(&journal->j_state_lock);
                if (transaction && time_after_eq(jiffies,
                                                transaction->t_expires))
                        should_sleep = 0;
-               if (journal->j_flags & JFS_UNMOUNT)
+               if (journal->j_flags & JBD2_UNMOUNT)
                        should_sleep = 0;
                if (should_sleep) {
                        spin_unlock(&journal->j_state_lock);
                finish_wait(&journal->j_wait_commit, &wait);
        }
 
-       jbd_debug(1, "kjournald wakes\n");
+       jbd_debug(1, "kjournald2 wakes\n");
 
        /*
         * Were we woken up by a commit wakeup event?
        return 0;
 }
 
-static void journal_start_thread(journal_t *journal)
+static void jbd2_journal_start_thread(journal_t *journal)
 {
-       kthread_run(kjournald, journal, "kjournald");
+       kthread_run(kjournald2, journal, "kjournald2");
        wait_event(journal->j_wait_done_commit, journal->j_task != 0);
 }
 
 static void journal_kill_thread(journal_t *journal)
 {
        spin_lock(&journal->j_state_lock);
-       journal->j_flags |= JFS_UNMOUNT;
+       journal->j_flags |= JBD2_UNMOUNT;
 
        while (journal->j_task) {
                wake_up(&journal->j_wait_commit);
 }
 
 /*
- * journal_write_metadata_buffer: write a metadata buffer to the journal.
+ * jbd2_journal_write_metadata_buffer: write a metadata buffer to the journal.
  *
  * Writes a metadata buffer to a given disk block.  The actual IO is not
  * performed but a new buffer_head is constructed which labels the data
  *
  * Any magic-number escaping which needs to be done will cause a
  * copy-out here.  If the buffer happens to start with the
- * JFS_MAGIC_NUMBER, then we can't write it to the log directly: the
+ * JBD2_MAGIC_NUMBER, then we can't write it to the log directly: the
  * magic number is only written to the log for descripter blocks.  In
  * this case, we copy the data and replace the first word with 0, and we
  * return a result code which indicates that this buffer needs to be
  * Bit 1 set == buffer copy-out performed (kfree the data after IO)
  */
 
-int journal_write_metadata_buffer(transaction_t *transaction,
+int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
                                  struct journal_head  *jh_in,
                                  struct journal_head **jh_out,
                                  unsigned long blocknr)
         * Check for escaping
         */
        if (*((__be32 *)(mapped_data + new_offset)) ==
-                               cpu_to_be32(JFS_MAGIC_NUMBER)) {
+                               cpu_to_be32(JBD2_MAGIC_NUMBER)) {
                need_copy_out = 1;
                do_escape = 1;
        }
                char *tmp;
 
                jbd_unlock_bh_state(bh_in);
-               tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS);
+               tmp = jbd2_slab_alloc(bh_in->b_size, GFP_NOFS);
                jbd_lock_bh_state(bh_in);
                if (jh_in->b_frozen_data) {
-                       jbd_slab_free(tmp, bh_in->b_size);
+                       jbd2_slab_free(tmp, bh_in->b_size);
                        goto repeat;
                }
 
        atomic_set(&new_bh->b_count, 1);
        jbd_unlock_bh_state(bh_in);
 
-       new_jh = journal_add_journal_head(new_bh);      /* This sleeps */
+       new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
 
        set_bh_page(new_bh, new_page, new_offset);
        new_jh->b_transaction = NULL;
         * copying is moved to the transaction's shadow queue.
         */
        JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
-       journal_file_buffer(jh_in, transaction, BJ_Shadow);
+       jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
        JBUFFER_TRACE(new_jh, "file as BJ_IO");
-       journal_file_buffer(new_jh, transaction, BJ_IO);
+       jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
 
        return do_escape | (done_copy_out << 1);
 }
  */
 
 /*
- * __log_space_left: Return the number of free blocks left in the journal.
+ * __jbd2_log_space_left: Return the number of free blocks left in the journal.
  *
  * Called with the journal already locked.
  *
  * Called under j_state_lock
  */
 
-int __log_space_left(journal_t *journal)
+int __jbd2_log_space_left(journal_t *journal)
 {
        int left = journal->j_free;
 
 /*
  * Called under j_state_lock.  Returns true if a transaction was started.
  */
-int __log_start_commit(journal_t *journal, tid_t target)
+int __jbd2_log_start_commit(journal_t *journal, tid_t target)
 {
        /*
         * Are we already doing a recent enough commit?
        return 0;
 }
 
-int log_start_commit(journal_t *journal, tid_t tid)
+int jbd2_log_start_commit(journal_t *journal, tid_t tid)
 {
        int ret;
 
        spin_lock(&journal->j_state_lock);
-       ret = __log_start_commit(journal, tid);
+       ret = __jbd2_log_start_commit(journal, tid);
        spin_unlock(&journal->j_state_lock);
        return ret;
 }
  *
  * Returns true if a transaction was started.
  */
-int journal_force_commit_nested(journal_t *journal)
+int jbd2_journal_force_commit_nested(journal_t *journal)
 {
        transaction_t *transaction = NULL;
        tid_t tid;
        spin_lock(&journal->j_state_lock);
        if (journal->j_running_transaction && !current->journal_info) {
                transaction = journal->j_running_transaction;
-               __log_start_commit(journal, transaction->t_tid);
+               __jbd2_log_start_commit(journal, transaction->t_tid);
        } else if (journal->j_committing_transaction)
                transaction = journal->j_committing_transaction;
 
 
        tid = transaction->t_tid;
        spin_unlock(&journal->j_state_lock);
-       log_wait_commit(journal, tid);
+       jbd2_log_wait_commit(journal, tid);
        return 1;
 }
 
  * Start a commit of the current running transaction (if any).  Returns true
  * if a transaction was started, and fills its tid in at *ptid
  */
-int journal_start_commit(journal_t *journal, tid_t *ptid)
+int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
 {
        int ret = 0;
 
        if (journal->j_running_transaction) {
                tid_t tid = journal->j_running_transaction->t_tid;
 
-               ret = __log_start_commit(journal, tid);
+               ret = __jbd2_log_start_commit(journal, tid);
                if (ret && ptid)
                        *ptid = tid;
        } else if (journal->j_committing_transaction && ptid) {
  * Wait for a specified commit to complete.
  * The caller may not hold the journal lock.
  */
-int log_wait_commit(journal_t *journal, tid_t tid)
+int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
 {
        int err = 0;
 
  * Log buffer allocation routines:
  */
 
-int journal_next_log_block(journal_t *journal, unsigned long *retp)
+int jbd2_journal_next_log_block(journal_t *journal, unsigned long *retp)
 {
        unsigned long blocknr;
 
        if (journal->j_head == journal->j_last)
                journal->j_head = journal->j_first;
        spin_unlock(&journal->j_state_lock);
-       return journal_bmap(journal, blocknr, retp);
+       return jbd2_journal_bmap(journal, blocknr, retp);
 }
 
 /*
  * this is a no-op.  If needed, we can use j_blk_offset - everything is
  * ready.
  */
-int journal_bmap(journal_t *journal, unsigned long blocknr,
+int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
                 unsigned long *retp)
 {
        int err = 0;
  * the journal without copying their contents, but for journal
  * descriptor blocks we do need to generate bona fide buffers.
  *
- * After the caller of journal_get_descriptor_buffer() has finished modifying
+ * After the caller of jbd2_journal_get_descriptor_buffer() has finished modifying
  * the buffer's contents they really should run flush_dcache_page(bh->b_page).
  * But we don't bother doing that, so there will be coherency problems with
  * mmaps of blockdevs which hold live JBD-controlled filesystems.
  */
-struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
+struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
 {
        struct buffer_head *bh;
        unsigned long blocknr;
        int err;
 
-       err = journal_next_log_block(journal, &blocknr);
+       err = jbd2_journal_next_log_block(journal, &blocknr);
 
        if (err)
                return NULL;
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
        BUFFER_TRACE(bh, "return this buffer");
-       return journal_add_journal_head(bh);
+       return jbd2_journal_add_journal_head(bh);
 }
 
 /*
        journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
 
        /* The journal is marked for error until we succeed with recovery! */
-       journal->j_flags = JFS_ABORT;
+       journal->j_flags = JBD2_ABORT;
 
        /* Set up a default-sized revoke table for the new mount. */
-       err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
+       err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
        if (err) {
                kfree(journal);
                goto fail;
        return NULL;
 }
 
-/* journal_init_dev and journal_init_inode:
+/* jbd2_journal_init_dev and jbd2_journal_init_inode:
  *
  * Create a journal structure assigned some fixed set of disk blocks to
  * the journal.  We don't actually touch those disk blocks yet, but we
  */
 
 /**
- *  journal_t * journal_init_dev() - creates an initialises a journal structure
+ *  journal_t * jbd2_journal_init_dev() - creates an initialises a journal structure
  *  @bdev: Block device on which to create the journal
  *  @fs_dev: Device which hold journalled filesystem for this journal.
  *  @start: Block nr Start of journal.
  *  @blocksize: blocksize of journalling device
  *  @returns: a newly created journal_t *
  *
- *  journal_init_dev creates a journal which maps a fixed contiguous
+ *  jbd2_journal_init_dev creates a journal which maps a fixed contiguous
  *  range of blocks on an arbitrary block device.
  *
  */
-journal_t * journal_init_dev(struct block_device *bdev,
+journal_t * jbd2_journal_init_dev(struct block_device *bdev,
                        struct block_device *fs_dev,
                        int start, int len, int blocksize)
 {
 }
 
 /**
- *  journal_t * journal_init_inode () - creates a journal which maps to a inode.
+ *  journal_t * jbd2_journal_init_inode () - creates a journal which maps to a inode.
  *  @inode: An inode to create the journal in
  *
- * journal_init_inode creates a journal which maps an on-disk inode as
+ * jbd2_journal_init_inode creates a journal which maps an on-disk inode as
  * the journal.  The inode must exist already, must support bmap() and
  * must have all data blocks preallocated.
  */
-journal_t * journal_init_inode (struct inode *inode)
+journal_t * jbd2_journal_init_inode (struct inode *inode)
 {
        struct buffer_head *bh;
        journal_t *journal = journal_init_common();
                return NULL;
        }
 
-       err = journal_bmap(journal, 0, &blocknr);
+       err = jbd2_journal_bmap(journal, 0, &blocknr);
        /* If that failed, give up */
        if (err) {
                printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
        journal->j_max_transaction_buffers = journal->j_maxlen / 4;
 
        /* Add the dynamic fields and write it to disk. */
-       journal_update_superblock(journal, 1);
-       journal_start_thread(journal);
+       jbd2_journal_update_superblock(journal, 1);
+       jbd2_journal_start_thread(journal);
        return 0;
 }
 
 /**
- * int journal_create() - Initialise the new journal file
+ * int jbd2_journal_create() - Initialise the new journal file
  * @journal: Journal to create. This structure must have been initialised
  *
  * Given a journal_t structure which tells us which disk blocks we can
  * use, create a new journal superblock and initialise all of the
  * journal fields from scratch.
  **/
-int journal_create(journal_t *journal)
+int jbd2_journal_create(journal_t *journal)
 {
        unsigned long blocknr;
        struct buffer_head *bh;
        journal_superblock_t *sb;
        int i, err;
 
-       if (journal->j_maxlen < JFS_MIN_JOURNAL_BLOCKS) {
+       if (journal->j_maxlen < JBD2_MIN_JOURNAL_BLOCKS) {
                printk (KERN_ERR "Journal length (%d blocks) too short.\n",
                        journal->j_maxlen);
                journal_fail_superblock(journal);
        }
 
        /* Zero out the entire journal on disk.  We cannot afford to
-          have any blocks on disk beginning with JFS_MAGIC_NUMBER. */
+          have any blocks on disk beginning with JBD2_MAGIC_NUMBER. */
        jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
        for (i = 0; i < journal->j_maxlen; i++) {
-               err = journal_bmap(journal, i, &blocknr);
+               err = jbd2_journal_bmap(journal, i, &blocknr);
                if (err)
                        return err;
                bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
        /* OK, fill in the initial static fields in the new superblock */
        sb = journal->j_superblock;
 
-       sb->s_header.h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
-       sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
+       sb->s_header.h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
+       sb->s_header.h_blocktype = cpu_to_be32(JBD2_SUPERBLOCK_V2);
 
        sb->s_blocksize = cpu_to_be32(journal->j_blocksize);
        sb->s_maxlen    = cpu_to_be32(journal->j_maxlen);
 
        journal->j_transaction_sequence = 1;
 
-       journal->j_flags &= ~JFS_ABORT;
+       journal->j_flags &= ~JBD2_ABORT;
        journal->j_format_version = 2;
 
        return journal_reset(journal);
 }
 
 /**
- * void journal_update_superblock() - Update journal sb on disk.
+ * void jbd2_journal_update_superblock() - Update journal sb on disk.
  * @journal: The journal to update.
  * @wait: Set to '0' if you don't want to wait for IO completion.
  *
  * Update a journal's dynamic superblock fields and write it to disk,
  * optionally waiting for the IO to complete.
  */
-void journal_update_superblock(journal_t *journal, int wait)
+void jbd2_journal_update_superblock(journal_t *journal, int wait)
 {
        journal_superblock_t *sb = journal->j_superblock;
        struct buffer_head *bh = journal->j_sb_buffer;
         * As a special case, if the on-disk copy is already marked as needing
         * no recovery (s_start == 0) and there are no outstanding transactions
         * in the filesystem, then we can safely defer the superblock update
-        * until the next commit by setting JFS_FLUSHED.  This avoids
+        * until the next commit by setting JBD2_FLUSHED.  This avoids
         * attempting a write to a potential-readonly device.
         */
        if (sb->s_start == 0 && journal->j_tail_sequence ==
 
        spin_lock(&journal->j_state_lock);
        if (sb->s_start)
-               journal->j_flags &= ~JFS_FLUSHED;
+               journal->j_flags &= ~JBD2_FLUSHED;
        else
-               journal->j_flags |= JFS_FLUSHED;
+               journal->j_flags |= JBD2_FLUSHED;
        spin_unlock(&journal->j_state_lock);
 }
 
 
        err = -EINVAL;
 
-       if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) ||
+       if (sb->s_header.h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER) ||
            sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
                printk(KERN_WARNING "JBD: no valid journal superblock found\n");
                goto out;
        }
 
        switch(be32_to_cpu(sb->s_header.h_blocktype)) {
-       case JFS_SUPERBLOCK_V1:
+       case JBD2_SUPERBLOCK_V1:
                journal->j_format_version = 1;
                break;
-       case JFS_SUPERBLOCK_V2:
+       case JBD2_SUPERBLOCK_V2:
                journal->j_format_version = 2;
                break;
        default:
 
 
 /**
- * int journal_load() - Read journal from disk.
+ * int jbd2_journal_load() - Read journal from disk.
  * @journal: Journal to act on.
  *
  * Given a journal_t structure which tells us which disk blocks contain
  * a journal, read the journal from disk to initialise the in-memory
  * structures.
  */
-int journal_load(journal_t *journal)
+int jbd2_journal_load(journal_t *journal)
 {
        int err;
        journal_superblock_t *sb;
 
        if (journal->j_format_version >= 2) {
                if ((sb->s_feature_ro_compat &
-                    ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
+                    ~cpu_to_be32(JBD2_KNOWN_ROCOMPAT_FEATURES)) ||
                    (sb->s_feature_incompat &
-                    ~cpu_to_be32(JFS_KNOWN_INCOMPAT_FEATURES))) {
+                    ~cpu_to_be32(JBD2_KNOWN_INCOMPAT_FEATURES))) {
                        printk (KERN_WARNING
                                "JBD: Unrecognised features on journal\n");
                        return -EINVAL;
        /*
         * Create a slab for this blocksize
         */
-       err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
+       err = jbd2_journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
        if (err)
                return err;
 
        /* Let the recovery code check whether it needs to recover any
         * data from the journal. */
-       if (journal_recover(journal))
+       if (jbd2_journal_recover(journal))
                goto recovery_error;
 
        /* OK, we've finished with the dynamic journal bits:
        if (journal_reset(journal))
                goto recovery_error;
 
-       journal->j_flags &= ~JFS_ABORT;
-       journal->j_flags |= JFS_LOADED;
+       journal->j_flags &= ~JBD2_ABORT;
+       journal->j_flags |= JBD2_LOADED;
        return 0;
 
 recovery_error:
 }
 
 /**
- * void journal_destroy() - Release a journal_t structure.
+ * void jbd2_journal_destroy() - Release a journal_t structure.
  * @journal: Journal to act on.
  *
  * Release a journal_t structure once it is no longer in use by the
  * journaled object.
  */
-void journal_destroy(journal_t *journal)
+void jbd2_journal_destroy(journal_t *journal)
 {
        /* Wait for the commit thread to wake up and die. */
        journal_kill_thread(journal);
 
        /* Force a final log commit */
        if (journal->j_running_transaction)
-               journal_commit_transaction(journal);
+               jbd2_journal_commit_transaction(journal);
 
        /* Force any old transactions to disk */
 
        spin_lock(&journal->j_list_lock);
        while (journal->j_checkpoint_transactions != NULL) {
                spin_unlock(&journal->j_list_lock);
-               log_do_checkpoint(journal);
+               jbd2_log_do_checkpoint(journal);
                spin_lock(&journal->j_list_lock);
        }
 
        journal->j_tail = 0;
        journal->j_tail_sequence = ++journal->j_transaction_sequence;
        if (journal->j_sb_buffer) {
-               journal_update_superblock(journal, 1);
+               jbd2_journal_update_superblock(journal, 1);
                brelse(journal->j_sb_buffer);
        }
 
        if (journal->j_inode)
                iput(journal->j_inode);
        if (journal->j_revoke)
-               journal_destroy_revoke(journal);
+               jbd2_journal_destroy_revoke(journal);
        kfree(journal->j_wbuf);
        kfree(journal);
 }
 
 
 /**
- *int journal_check_used_features () - Check if features specified are used.
+ *int jbd2_journal_check_used_features () - Check if features specified are used.
  * @journal: Journal to check.
  * @compat: bitmask of compatible features
  * @ro: bitmask of features that force read-only mount
  * features.  Return true (non-zero) if it does.
  **/
 
-int journal_check_used_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
                                 unsigned long ro, unsigned long incompat)
 {
        journal_superblock_t *sb;
 }
 
 /**
- * int journal_check_available_features() - Check feature set in journalling layer
+ * int jbd2_journal_check_available_features() - Check feature set in journalling layer
  * @journal: Journal to check.
  * @compat: bitmask of compatible features
  * @ro: bitmask of features that force read-only mount
  * all of a given set of features on this journal.  Return true
  * (non-zero) if it can. */
 
-int journal_check_available_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat,
                                      unsigned long ro, unsigned long incompat)
 {
        journal_superblock_t *sb;
        if (journal->j_format_version != 2)
                return 0;
 
-       if ((compat   & JFS_KNOWN_COMPAT_FEATURES) == compat &&
-           (ro       & JFS_KNOWN_ROCOMPAT_FEATURES) == ro &&
-           (incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat)
+       if ((compat   & JBD2_KNOWN_COMPAT_FEATURES) == compat &&
+           (ro       & JBD2_KNOWN_ROCOMPAT_FEATURES) == ro &&
+           (incompat & JBD2_KNOWN_INCOMPAT_FEATURES) == incompat)
                return 1;
 
        return 0;
 }
 
 /**
- * int journal_set_features () - Mark a given journal feature in the superblock
+ * int jbd2_journal_set_features () - Mark a given journal feature in the superblock
  * @journal: Journal to act on.
  * @compat: bitmask of compatible features
  * @ro: bitmask of features that force read-only mount
  *
  */
 
-int journal_set_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
                          unsigned long ro, unsigned long incompat)
 {
        journal_superblock_t *sb;
 
-       if (journal_check_used_features(journal, compat, ro, incompat))
+       if (jbd2_journal_check_used_features(journal, compat, ro, incompat))
                return 1;
 
-       if (!journal_check_available_features(journal, compat, ro, incompat))
+       if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
                return 0;
 
        jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
 
 
 /**
- * int journal_update_format () - Update on-disk journal structure.
+ * int jbd2_journal_update_format () - Update on-disk journal structure.
  * @journal: Journal to act on.
  *
  * Given an initialised but unloaded journal struct, poke about in the
  * on-disk structure to update it to the most recent supported version.
  */
-int journal_update_format (journal_t *journal)
+int jbd2_journal_update_format (journal_t *journal)
 {
        journal_superblock_t *sb;
        int err;
        sb = journal->j_superblock;
 
        switch (be32_to_cpu(sb->s_header.h_blocktype)) {
-       case JFS_SUPERBLOCK_V2:
+       case JBD2_SUPERBLOCK_V2:
                return 0;
-       case JFS_SUPERBLOCK_V1:
+       case JBD2_SUPERBLOCK_V1:
                return journal_convert_superblock_v1(journal, sb);
        default:
                break;
        memset(&sb->s_feature_compat, 0, blocksize-offset);
 
        sb->s_nr_users = cpu_to_be32(1);
-       sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
+       sb->s_header.h_blocktype = cpu_to_be32(JBD2_SUPERBLOCK_V2);
        journal->j_format_version = 2;
 
        bh = journal->j_sb_buffer;
 
 
 /**
- * int journal_flush () - Flush journal
+ * int jbd2_journal_flush () - Flush journal
  * @journal: Journal to act on.
  *
  * Flush all data for a given journal to disk and empty the journal.
  * recovery does not need to happen on remount.
  */
 
-int journal_flush(journal_t *journal)
+int jbd2_journal_flush(journal_t *journal)
 {
        int err = 0;
        transaction_t *transaction = NULL;
        /* Force everything buffered to the log... */
        if (journal->j_running_transaction) {
                transaction = journal->j_running_transaction;
-               __log_start_commit(journal, transaction->t_tid);
+               __jbd2_log_start_commit(journal, transaction->t_tid);
        } else if (journal->j_committing_transaction)
                transaction = journal->j_committing_transaction;
 
                tid_t tid = transaction->t_tid;
 
                spin_unlock(&journal->j_state_lock);
-               log_wait_commit(journal, tid);
+               jbd2_log_wait_commit(journal, tid);
        } else {
                spin_unlock(&journal->j_state_lock);
        }
        spin_lock(&journal->j_list_lock);
        while (!err && journal->j_checkpoint_transactions != NULL) {
                spin_unlock(&journal->j_list_lock);
-               err = log_do_checkpoint(journal);
+               err = jbd2_log_do_checkpoint(journal);
                spin_lock(&journal->j_list_lock);
        }
        spin_unlock(&journal->j_list_lock);
-       cleanup_journal_tail(journal);
+       jbd2_cleanup_journal_tail(journal);
 
        /* Finally, mark the journal as really needing no recovery.
         * This sets s_start==0 in the underlying superblock, which is
        old_tail = journal->j_tail;
        journal->j_tail = 0;
        spin_unlock(&journal->j_state_lock);
-       journal_update_superblock(journal, 1);
+       jbd2_journal_update_superblock(journal, 1);
        spin_lock(&journal->j_state_lock);
        journal->j_tail = old_tail;
 
 }
 
 /**
- * int journal_wipe() - Wipe journal contents
+ * int jbd2_journal_wipe() - Wipe journal contents
  * @journal: Journal to act on.
  * @write: flag (see below)
  *
  * Wipe out all of the contents of a journal, safely.  This will produce
  * a warning if the journal contains any valid recovery information.
- * Must be called between journal_init_*() and journal_load().
+ * Must be called between journal_init_*() and jbd2_journal_load().
  *
  * If 'write' is non-zero, then we wipe out the journal on disk; otherwise
  * we merely suppress recovery.
  */
 
-int journal_wipe(journal_t *journal, int write)
+int jbd2_journal_wipe(journal_t *journal, int write)
 {
        journal_superblock_t *sb;
        int err = 0;
 
-       J_ASSERT (!(journal->j_flags & JFS_LOADED));
+       J_ASSERT (!(journal->j_flags & JBD2_LOADED));
 
        err = load_superblock(journal);
        if (err)
        printk (KERN_WARNING "JBD: %s recovery information on journal\n",
                write ? "Clearing" : "Ignoring");
 
-       err = journal_skip_recovery(journal);
+       err = jbd2_journal_skip_recovery(journal);
        if (write)
-               journal_update_superblock(journal, 1);
+               jbd2_journal_update_superblock(journal, 1);
 
  no_recovery:
        return err;
  * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
  * and don't attempt to make any other journal updates.
  */
-void __journal_abort_hard(journal_t *journal)
+void __jbd2_journal_abort_hard(journal_t *journal)
 {
        transaction_t *transaction;
        char b[BDEVNAME_SIZE];
 
-       if (journal->j_flags & JFS_ABORT)
+       if (journal->j_flags & JBD2_ABORT)
                return;
 
        printk(KERN_ERR "Aborting journal on device %s.\n",
                journal_dev_name(journal, b));
 
        spin_lock(&journal->j_state_lock);
-       journal->j_flags |= JFS_ABORT;
+       journal->j_flags |= JBD2_ABORT;
        transaction = journal->j_running_transaction;
        if (transaction)
-               __log_start_commit(journal, transaction->t_tid);
+               __jbd2_log_start_commit(journal, transaction->t_tid);
        spin_unlock(&journal->j_state_lock);
 }
 
  * but don't do any other IO. */
 static void __journal_abort_soft (journal_t *journal, int errno)
 {
-       if (journal->j_flags & JFS_ABORT)
+       if (journal->j_flags & JBD2_ABORT)
                return;
 
        if (!journal->j_errno)
                journal->j_errno = errno;
 
-       __journal_abort_hard(journal);
+       __jbd2_journal_abort_hard(journal);
 
        if (errno)
-               journal_update_superblock(journal, 1);
+               jbd2_journal_update_superblock(journal, 1);
 }
 
 /**
- * void journal_abort () - Shutdown the journal immediately.
+ * void jbd2_journal_abort () - Shutdown the journal immediately.
  * @journal: the journal to shutdown.
  * @errno:   an error number to record in the journal indicating
  *           the reason for the shutdown.
  * journal (not of a single transaction).  This operation cannot be
  * undone without closing and reopening the journal.
  *
- * The journal_abort function is intended to support higher level error
+ * The jbd2_journal_abort function is intended to support higher level error
  * recovery mechanisms such as the ext2/ext3 remount-readonly error
  * mode.
  *
  *
  * Any attempt to get a new transaction handle on a journal which is in
  * ABORT state will just result in an -EROFS error return.  A
- * journal_stop on an existing handle will return -EIO if we have
+ * jbd2_journal_stop on an existing handle will return -EIO if we have
  * entered abort state during the update.
  *
  * Recursive transactions are not disturbed by journal abort until the
- * final journal_stop, which will receive the -EIO error.
+ * final jbd2_journal_stop, which will receive the -EIO error.
  *
- * Finally, the journal_abort call allows the caller to supply an errno
+ * Finally, the jbd2_journal_abort call allows the caller to supply an errno
  * which will be recorded (if possible) in the journal superblock.  This
  * allows a client to record failure conditions in the middle of a
  * transaction without having to complete the transaction to record the
  *
  */
 
-void journal_abort(journal_t *journal, int errno)
+void jbd2_journal_abort(journal_t *journal, int errno)
 {
        __journal_abort_soft(journal, errno);
 }
 
 /**
- * int journal_errno () - returns the journal's error state.
+ * int jbd2_journal_errno () - returns the journal's error state.
  * @journal: journal to examine.
  *
- * This is the errno numbet set with journal_abort(), the last
+ * This is the errno numbet set with jbd2_journal_abort(), the last
  * time the journal was mounted - if the journal was stopped
  * without calling abort this will be 0.
  *
  * If the journal has been aborted on this mount time -EROFS will
  * be returned.
  */
-int journal_errno(journal_t *journal)
+int jbd2_journal_errno(journal_t *journal)
 {
        int err;
 
        spin_lock(&journal->j_state_lock);
-       if (journal->j_flags & JFS_ABORT)
+       if (journal->j_flags & JBD2_ABORT)
                err = -EROFS;
        else
                err = journal->j_errno;
 }
 
 /**
- * int journal_clear_err () - clears the journal's error state
+ * int jbd2_journal_clear_err () - clears the journal's error state
  * @journal: journal to act on.
  *
  * An error must be cleared or Acked to take a FS out of readonly
  * mode.
  */
-int journal_clear_err(journal_t *journal)
+int jbd2_journal_clear_err(journal_t *journal)
 {
        int err = 0;
 
        spin_lock(&journal->j_state_lock);
-       if (journal->j_flags & JFS_ABORT)
+       if (journal->j_flags & JBD2_ABORT)
                err = -EROFS;
        else
                journal->j_errno = 0;
 }
 
 /**
- * void journal_ack_err() - Ack journal err.
+ * void jbd2_journal_ack_err() - Ack journal err.
  * @journal: journal to act on.
  *
  * An error must be cleared or Acked to take a FS out of readonly
  * mode.
  */
-void journal_ack_err(journal_t *journal)
+void jbd2_journal_ack_err(journal_t *journal)
 {
        spin_lock(&journal->j_state_lock);
        if (journal->j_errno)
-               journal->j_flags |= JFS_ACK_ERR;
+               journal->j_flags |= JBD2_ACK_ERR;
        spin_unlock(&journal->j_state_lock);
 }
 
-int journal_blocks_per_page(struct inode *inode)
+int jbd2_journal_blocks_per_page(struct inode *inode)
 {
        return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 }
  * Simple support for retrying memory allocations.  Introduced to help to
  * debug different VM deadlock avoidance strategies.
  */
-void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
+void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
 {
        return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
 }
        "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
 };
 
-static void journal_destroy_jbd_slabs(void)
+static void jbd2_journal_destroy_jbd_slabs(void)
 {
        int i;
 
        }
 }
 
-static int journal_create_jbd_slab(size_t slab_size)
+static int jbd2_journal_create_jbd_slab(size_t slab_size)
 {
        int i = JBD_SLAB_INDEX(slab_size);
 
        return 0;
 }
 
-void * jbd_slab_alloc(size_t size, gfp_t flags)
+void * jbd2_slab_alloc(size_t size, gfp_t flags)
 {
        int idx;
 
        return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
 }
 
-void jbd_slab_free(void *ptr,  size_t size)
+void jbd2_slab_free(void *ptr,  size_t size)
 {
        int idx;
 
 /*
  * Journal_head storage management
  */
-static kmem_cache_t *journal_head_cache;
+static kmem_cache_t *jbd2_journal_head_cache;
 #ifdef CONFIG_JBD_DEBUG
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
 
-static int journal_init_journal_head_cache(void)
+static int journal_init_jbd2_journal_head_cache(void)
 {
        int retval;
 
-       J_ASSERT(journal_head_cache == 0);
-       journal_head_cache = kmem_cache_create("journal_head",
+       J_ASSERT(jbd2_journal_head_cache == 0);
+       jbd2_journal_head_cache = kmem_cache_create("journal_head",
                                sizeof(struct journal_head),
                                0,              /* offset */
                                0,              /* flags */
                                NULL,           /* ctor */
                                NULL);          /* dtor */
        retval = 0;
-       if (journal_head_cache == 0) {
+       if (jbd2_journal_head_cache == 0) {
                retval = -ENOMEM;
                printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
        }
        return retval;
 }
 
-static void journal_destroy_journal_head_cache(void)
+static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
 {
-       J_ASSERT(journal_head_cache != NULL);
-       kmem_cache_destroy(journal_head_cache);
-       journal_head_cache = NULL;
+       J_ASSERT(jbd2_journal_head_cache != NULL);
+       kmem_cache_destroy(jbd2_journal_head_cache);
+       jbd2_journal_head_cache = NULL;
 }
 
 /*
 #ifdef CONFIG_JBD_DEBUG
        atomic_inc(&nr_journal_heads);
 #endif
-       ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
+       ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
        if (ret == 0) {
                jbd_debug(1, "out of memory for journal_head\n");
                if (time_after(jiffies, last_warning + 5*HZ)) {
                }
                while (ret == 0) {
                        yield();
-                       ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
+                       ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
                }
        }
        return ret;
        atomic_dec(&nr_journal_heads);
        memset(jh, JBD_POISON_FREE, sizeof(*jh));
 #endif
-       kmem_cache_free(journal_head_cache, jh);
+       kmem_cache_free(jbd2_journal_head_cache, jh);
 }
 
 /*
  *
  * A journal_head may be detached from its buffer_head when the journal_head's
  * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL.
- * Various places in JBD call journal_remove_journal_head() to indicate that the
+ * Various places in JBD call jbd2_journal_remove_journal_head() to indicate that the
  * journal_head can be dropped if needed.
  *
  * Various places in the kernel want to attach a journal_head to a buffer_head
  * _before_ attaching the journal_head to a transaction.  To protect the
- * journal_head in this situation, journal_add_journal_head elevates the
+ * journal_head in this situation, jbd2_journal_add_journal_head elevates the
  * journal_head's b_jcount refcount by one.  The caller must call
- * journal_put_journal_head() to undo this.
+ * jbd2_journal_put_journal_head() to undo this.
  *
  * So the typical usage would be:
  *
  *     (Attach a journal_head if needed.  Increments b_jcount)
- *     struct journal_head *jh = journal_add_journal_head(bh);
+ *     struct journal_head *jh = jbd2_journal_add_journal_head(bh);
  *     ...
  *     jh->b_transaction = xxx;
- *     journal_put_journal_head(jh);
+ *     jbd2_journal_put_journal_head(jh);
  *
  * Now, the journal_head's b_jcount is zero, but it is safe from being released
  * because it has a non-zero b_transaction.
  * Doesn't need the journal lock.
  * May sleep.
  */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh)
+struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
 {
        struct journal_head *jh;
        struct journal_head *new_jh = NULL;
  * Grab a ref against this buffer_head's journal_head.  If it ended up not
  * having a journal_head, return NULL
  */
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh)
+struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh)
 {
        struct journal_head *jh = NULL;
 
                                printk(KERN_WARNING "%s: freeing "
                                                "b_frozen_data\n",
                                                __FUNCTION__);
-                               jbd_slab_free(jh->b_frozen_data, bh->b_size);
+                               jbd2_slab_free(jh->b_frozen_data, bh->b_size);
                        }
                        if (jh->b_committed_data) {
                                printk(KERN_WARNING "%s: freeing "
                                                "b_committed_data\n",
                                                __FUNCTION__);
-                               jbd_slab_free(jh->b_committed_data, bh->b_size);
+                               jbd2_slab_free(jh->b_committed_data, bh->b_size);
                        }
                        bh->b_private = NULL;
                        jh->b_bh = NULL;        /* debug, really */
 }
 
 /*
- * journal_remove_journal_head(): if the buffer isn't attached to a transaction
+ * jbd2_journal_remove_journal_head(): if the buffer isn't attached to a transaction
  * and has a zero b_jcount then remove and release its journal_head.   If we did
  * see that the buffer is not used by any transaction we also "logically"
  * decrement ->b_count.
  * We in fact take an additional increment on ->b_count as a convenience,
  * because the caller usually wants to do additional things with the bh
  * after calling here.
- * The caller of journal_remove_journal_head() *must* run __brelse(bh) at some
+ * The caller of jbd2_journal_remove_journal_head() *must* run __brelse(bh) at some
  * time.  Once the caller has run __brelse(), the buffer is eligible for
  * reaping by try_to_free_buffers().
  */
-void journal_remove_journal_head(struct buffer_head *bh)
+void jbd2_journal_remove_journal_head(struct buffer_head *bh)
 {
        jbd_lock_bh_journal_head(bh);
        __journal_remove_journal_head(bh);
  * Drop a reference on the passed journal_head.  If it fell to zero then try to
  * release the journal_head from the buffer_head.
  */
-void journal_put_journal_head(struct journal_head *jh)
+void jbd2_journal_put_journal_head(struct journal_head *jh)
 {
        struct buffer_head *bh = jh2bh(jh);
 
  * /proc tunables
  */
 #if defined(CONFIG_JBD_DEBUG)
-int journal_enable_debug;
-EXPORT_SYMBOL(journal_enable_debug);
+int jbd2_journal_enable_debug;
+EXPORT_SYMBOL(jbd2_journal_enable_debug);
 #endif
 
 #if defined(CONFIG_JBD_DEBUG) && defined(CONFIG_PROC_FS)
 {
        int ret;
 
-       ret = sprintf(page + off, "%d\n", journal_enable_debug);
+       ret = sprintf(page + off, "%d\n", jbd2_journal_enable_debug);
        *eof = 1;
        return ret;
 }
        if (copy_from_user(buf, buffer, count))
                return -EFAULT;
        buf[ARRAY_SIZE(buf) - 1] = '\0';
-       journal_enable_debug = simple_strtoul(buf, NULL, 10);
+       jbd2_journal_enable_debug = simple_strtoul(buf, NULL, 10);
        return count;
 }
 
-#define JBD_PROC_NAME "sys/fs/jbd-debug"
+#define JBD_PROC_NAME "sys/fs/jbd2-debug"
 
 static void __init create_jbd_proc_entry(void)
 {
        }
 }
 
-static void __exit remove_jbd_proc_entry(void)
+static void __exit jbd2_remove_jbd_proc_entry(void)
 {
        if (proc_jbd_debug)
                remove_proc_entry(JBD_PROC_NAME, NULL);
 #else
 
 #define create_jbd_proc_entry() do {} while (0)
-#define remove_jbd_proc_entry() do {} while (0)
+#define jbd2_remove_jbd_proc_entry() do {} while (0)
 
 #endif
 
-kmem_cache_t *jbd_handle_cache;
+kmem_cache_t *jbd2_handle_cache;
 
 static int __init journal_init_handle_cache(void)
 {
-       jbd_handle_cache = kmem_cache_create("journal_handle",
+       jbd2_handle_cache = kmem_cache_create("journal_handle",
                                sizeof(handle_t),
                                0,              /* offset */
                                0,              /* flags */
                                NULL,           /* ctor */
                                NULL);          /* dtor */
-       if (jbd_handle_cache == NULL) {
+       if (jbd2_handle_cache == NULL) {
                printk(KERN_EMERG "JBD: failed to create handle cache\n");
                return -ENOMEM;
        }
        return 0;
 }
 
-static void journal_destroy_handle_cache(void)
+static void jbd2_journal_destroy_handle_cache(void)
 {
-       if (jbd_handle_cache)
-               kmem_cache_destroy(jbd_handle_cache);
+       if (jbd2_handle_cache)
+               kmem_cache_destroy(jbd2_handle_cache);
 }
 
 /*
 {
        int ret;
 
-       ret = journal_init_revoke_caches();
+       ret = jbd2_journal_init_revoke_caches();
        if (ret == 0)
-               ret = journal_init_journal_head_cache();
+               ret = journal_init_jbd2_journal_head_cache();
        if (ret == 0)
                ret = journal_init_handle_cache();
        return ret;
 }
 
-static void journal_destroy_caches(void)
+static void jbd2_journal_destroy_caches(void)
 {
-       journal_destroy_revoke_caches();
-       journal_destroy_journal_head_cache();
-       journal_destroy_handle_cache();
-       journal_destroy_jbd_slabs();
+       jbd2_journal_destroy_revoke_caches();
+       jbd2_journal_destroy_jbd2_journal_head_cache();
+       jbd2_journal_destroy_handle_cache();
+       jbd2_journal_destroy_jbd_slabs();
 }
 
 static int __init journal_init(void)
 
        ret = journal_init_caches();
        if (ret != 0)
-               journal_destroy_caches();
+               jbd2_journal_destroy_caches();
        create_jbd_proc_entry();
        return ret;
 }
        if (n)
                printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
 #endif
-       remove_jbd_proc_entry();
-       journal_destroy_caches();
+       jbd2_remove_jbd_proc_entry();
+       jbd2_journal_destroy_caches();
 }
 
 MODULE_LICENSE("GPL");
 
 #else
 #include <linux/time.h>
 #include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
 #endif
        nbufs = 0;
 
        for (next = start; next < max; next++) {
-               err = journal_bmap(journal, next, &blocknr);
+               err = jbd2_journal_bmap(journal, next, &blocknr);
 
                if (err) {
                        printk (KERN_ERR "JBD: bad block at offset %u\n",
                return -EIO;
        }
 
-       err = journal_bmap(journal, offset, &blocknr);
+       err = jbd2_journal_bmap(journal, offset, &blocknr);
 
        if (err) {
                printk (KERN_ERR "JBD: bad block at offset %u\n",
 
                nr++;
                tagp += sizeof(journal_block_tag_t);
-               if (!(tag->t_flags & cpu_to_be32(JFS_FLAG_SAME_UUID)))
+               if (!(tag->t_flags & cpu_to_be32(JBD2_FLAG_SAME_UUID)))
                        tagp += 16;
 
-               if (tag->t_flags & cpu_to_be32(JFS_FLAG_LAST_TAG))
+               if (tag->t_flags & cpu_to_be32(JBD2_FLAG_LAST_TAG))
                        break;
        }
 
 } while (0)
 
 /**
- * journal_recover - recovers a on-disk journal
+ * jbd2_journal_recover - recovers a on-disk journal
  * @journal: the journal to recover
  *
  * The primary function for recovering the log contents when mounting a
  * blocks.  In the third and final pass, we replay any un-revoked blocks
  * in the log.
  */
-int journal_recover(journal_t *journal)
+int jbd2_journal_recover(journal_t *journal)
 {
        int                     err;
        journal_superblock_t *  sb;
         * any existing commit records in the log. */
        journal->j_transaction_sequence = ++info.end_transaction;
 
-       journal_clear_revoke(journal);
+       jbd2_journal_clear_revoke(journal);
        sync_blockdev(journal->j_fs_dev);
        return err;
 }
 
 /**
- * journal_skip_recovery - Start journal and wipe exiting records
+ * jbd2_journal_skip_recovery - Start journal and wipe exiting records
  * @journal: journal to startup
  *
  * Locate any valid recovery information from the journal and set up the
  * much recovery information is being erased, and to let us initialise
  * the journal transaction sequence numbers to the next unused ID.
  */
-int journal_skip_recovery(journal_t *journal)
+int jbd2_journal_skip_recovery(journal_t *journal)
 {
        int                     err;
        journal_superblock_t *  sb;
 
                tmp = (journal_header_t *)bh->b_data;
 
-               if (tmp->h_magic != cpu_to_be32(JFS_MAGIC_NUMBER)) {
+               if (tmp->h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER)) {
                        brelse(bh);
                        break;
                }
                 * to do with it?  That depends on the pass... */
 
                switch(blocktype) {
-               case JFS_DESCRIPTOR_BLOCK:
+               case JBD2_DESCRIPTOR_BLOCK:
                        /* If it is a valid descriptor block, replay it
                         * in pass REPLAY; otherwise, just skip over the
                         * blocks it describes. */
                                        /* If the block has been
                                         * revoked, then we're all done
                                         * here. */
-                                       if (journal_test_revoke
+                                       if (jbd2_journal_test_revoke
                                            (journal, blocknr,
                                             next_commit_ID)) {
                                                brelse(obh);
                                        lock_buffer(nbh);
                                        memcpy(nbh->b_data, obh->b_data,
                                                        journal->j_blocksize);
-                                       if (flags & JFS_FLAG_ESCAPE) {
+                                       if (flags & JBD2_FLAG_ESCAPE) {
                                                *((__be32 *)bh->b_data) =
-                                               cpu_to_be32(JFS_MAGIC_NUMBER);
+                                               cpu_to_be32(JBD2_MAGIC_NUMBER);
                                        }
 
                                        BUFFER_TRACE(nbh, "marking dirty");
 
                        skip_write:
                                tagp += sizeof(journal_block_tag_t);
-                               if (!(flags & JFS_FLAG_SAME_UUID))
+                               if (!(flags & JBD2_FLAG_SAME_UUID))
                                        tagp += 16;
 
-                               if (flags & JFS_FLAG_LAST_TAG)
+                               if (flags & JBD2_FLAG_LAST_TAG)
                                        break;
                        }
 
                        brelse(bh);
                        continue;
 
-               case JFS_COMMIT_BLOCK:
+               case JBD2_COMMIT_BLOCK:
                        /* Found an expected commit block: not much to
                         * do other than move on to the next sequence
                         * number. */
                        next_commit_ID++;
                        continue;
 
-               case JFS_REVOKE_BLOCK:
+               case JBD2_REVOKE_BLOCK:
                        /* If we aren't in the REVOKE pass, then we can
                         * just skip over this block. */
                        if (pass != PASS_REVOKE) {
 static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
                               tid_t sequence, struct recovery_info *info)
 {
-       journal_revoke_header_t *header;
+       jbd2_journal_revoke_header_t *header;
        int offset, max;
 
-       header = (journal_revoke_header_t *) bh->b_data;
-       offset = sizeof(journal_revoke_header_t);
+       header = (jbd2_journal_revoke_header_t *) bh->b_data;
+       offset = sizeof(jbd2_journal_revoke_header_t);
        max = be32_to_cpu(header->r_count);
 
        while (offset < max) {
 
                blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
                offset += 4;
-               err = journal_set_revoke(journal, blocknr, sequence);
+               err = jbd2_journal_set_revoke(journal, blocknr, sequence);
                if (err)
                        return err;
                ++info->nr_revokes;
 
 #else
 #include <linux/time.h>
 #include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/list.h>
 #include <linux/init.h>
 #endif
 
-static kmem_cache_t *revoke_record_cache;
-static kmem_cache_t *revoke_table_cache;
+static kmem_cache_t *jbd2_revoke_record_cache;
+static kmem_cache_t *jbd2_revoke_table_cache;
 
 /* Each revoke record represents one single revoked block.  During
    journal replay, this involves recording the transaction ID of the
    last transaction to revoke this block. */
 
-struct jbd_revoke_record_s
+struct jbd2_revoke_record_s
 {
        struct list_head  hash;
        tid_t             sequence;     /* Used for recovery only */
 
 
 /* The revoke table is just a simple hash table of revoke records. */
-struct jbd_revoke_table_s
+struct jbd2_revoke_table_s
 {
        /* It is conceivable that we might want a larger hash table
         * for recovery.  Must be a power of two. */
 #ifdef __KERNEL__
 static void write_one_revoke_record(journal_t *, transaction_t *,
                                    struct journal_head **, int *,
-                                   struct jbd_revoke_record_s *);
+                                   struct jbd2_revoke_record_s *);
 static void flush_descriptor(journal_t *, struct journal_head *, int);
 #endif
 
 /* Borrowed from buffer.c: this is a tried and tested block hash function */
 static inline int hash(journal_t *journal, unsigned long block)
 {
-       struct jbd_revoke_table_s *table = journal->j_revoke;
+       struct jbd2_revoke_table_s *table = journal->j_revoke;
        int hash_shift = table->hash_shift;
 
        return ((block << (hash_shift - 6)) ^
                              tid_t seq)
 {
        struct list_head *hash_list;
-       struct jbd_revoke_record_s *record;
+       struct jbd2_revoke_record_s *record;
 
 repeat:
-       record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
+       record = kmem_cache_alloc(jbd2_revoke_record_cache, GFP_NOFS);
        if (!record)
                goto oom;
 
 
 /* Find a revoke record in the journal's hash table. */
 
-static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
+static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
                                                      unsigned long blocknr)
 {
        struct list_head *hash_list;
-       struct jbd_revoke_record_s *record;
+       struct jbd2_revoke_record_s *record;
 
        hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
 
        spin_lock(&journal->j_revoke_lock);
-       record = (struct jbd_revoke_record_s *) hash_list->next;
+       record = (struct jbd2_revoke_record_s *) hash_list->next;
        while (&(record->hash) != hash_list) {
                if (record->blocknr == blocknr) {
                        spin_unlock(&journal->j_revoke_lock);
                        return record;
                }
-               record = (struct jbd_revoke_record_s *) record->hash.next;
+               record = (struct jbd2_revoke_record_s *) record->hash.next;
        }
        spin_unlock(&journal->j_revoke_lock);
        return NULL;
 }
 
-int __init journal_init_revoke_caches(void)
+int __init jbd2_journal_init_revoke_caches(void)
 {
-       revoke_record_cache = kmem_cache_create("revoke_record",
-                                          sizeof(struct jbd_revoke_record_s),
+       jbd2_revoke_record_cache = kmem_cache_create("revoke_record",
+                                          sizeof(struct jbd2_revoke_record_s),
                                           0, SLAB_HWCACHE_ALIGN, NULL, NULL);
-       if (revoke_record_cache == 0)
+       if (jbd2_revoke_record_cache == 0)
                return -ENOMEM;
 
-       revoke_table_cache = kmem_cache_create("revoke_table",
-                                          sizeof(struct jbd_revoke_table_s),
+       jbd2_revoke_table_cache = kmem_cache_create("revoke_table",
+                                          sizeof(struct jbd2_revoke_table_s),
                                           0, 0, NULL, NULL);
-       if (revoke_table_cache == 0) {
-               kmem_cache_destroy(revoke_record_cache);
-               revoke_record_cache = NULL;
+       if (jbd2_revoke_table_cache == 0) {
+               kmem_cache_destroy(jbd2_revoke_record_cache);
+               jbd2_revoke_record_cache = NULL;
                return -ENOMEM;
        }
        return 0;
 }
 
-void journal_destroy_revoke_caches(void)
+void jbd2_journal_destroy_revoke_caches(void)
 {
-       kmem_cache_destroy(revoke_record_cache);
-       revoke_record_cache = NULL;
-       kmem_cache_destroy(revoke_table_cache);
-       revoke_table_cache = NULL;
+       kmem_cache_destroy(jbd2_revoke_record_cache);
+       jbd2_revoke_record_cache = NULL;
+       kmem_cache_destroy(jbd2_revoke_table_cache);
+       jbd2_revoke_table_cache = NULL;
 }
 
 /* Initialise the revoke table for a given journal to a given size. */
 
-int journal_init_revoke(journal_t *journal, int hash_size)
+int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
 {
        int shift, tmp;
 
        while((tmp >>= 1UL) != 0UL)
                shift++;
 
-       journal->j_revoke_table[0] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
+       journal->j_revoke_table[0] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
        if (!journal->j_revoke_table[0])
                return -ENOMEM;
        journal->j_revoke = journal->j_revoke_table[0];
        journal->j_revoke->hash_table =
                kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
        if (!journal->j_revoke->hash_table) {
-               kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
+               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
                journal->j_revoke = NULL;
                return -ENOMEM;
        }
        for (tmp = 0; tmp < hash_size; tmp++)
                INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
 
-       journal->j_revoke_table[1] = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
+       journal->j_revoke_table[1] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
        if (!journal->j_revoke_table[1]) {
                kfree(journal->j_revoke_table[0]->hash_table);
-               kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
+               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
                return -ENOMEM;
        }
 
                kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
        if (!journal->j_revoke->hash_table) {
                kfree(journal->j_revoke_table[0]->hash_table);
-               kmem_cache_free(revoke_table_cache, journal->j_revoke_table[0]);
-               kmem_cache_free(revoke_table_cache, journal->j_revoke_table[1]);
+               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
+               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[1]);
                journal->j_revoke = NULL;
                return -ENOMEM;
        }
 
 /* Destoy a journal's revoke table.  The table must already be empty! */
 
-void journal_destroy_revoke(journal_t *journal)
+void jbd2_journal_destroy_revoke(journal_t *journal)
 {
-       struct jbd_revoke_table_s *table;
+       struct jbd2_revoke_table_s *table;
        struct list_head *hash_list;
        int i;
 
        }
 
        kfree(table->hash_table);
-       kmem_cache_free(revoke_table_cache, table);
+       kmem_cache_free(jbd2_revoke_table_cache, table);
        journal->j_revoke = NULL;
 
        table = journal->j_revoke_table[1];
        }
 
        kfree(table->hash_table);
-       kmem_cache_free(revoke_table_cache, table);
+       kmem_cache_free(jbd2_revoke_table_cache, table);
        journal->j_revoke = NULL;
 }
 
 #ifdef __KERNEL__
 
 /*
- * journal_revoke: revoke a given buffer_head from the journal.  This
+ * jbd2_journal_revoke: revoke a given buffer_head from the journal.  This
  * prevents the block from being replayed during recovery if we take a
  * crash after this current transaction commits.  Any subsequent
  * metadata writes of the buffer in this transaction cancel the
  * revoke before clearing the block bitmap when we are deleting
  * metadata.
  *
- * Revoke performs a journal_forget on any buffer_head passed in as a
+ * Revoke performs a jbd2_journal_forget on any buffer_head passed in as a
  * parameter, but does _not_ forget the buffer_head if the bh was only
  * found implicitly.
  *
  * bh_in may not be a journalled buffer - it may have come off
  * the hash tables without an attached journal_head.
  *
- * If bh_in is non-zero, journal_revoke() will decrement its b_count
+ * If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count
  * by one.
  */
 
-int journal_revoke(handle_t *handle, unsigned long blocknr,
+int jbd2_journal_revoke(handle_t *handle, unsigned long blocknr,
                   struct buffer_head *bh_in)
 {
        struct buffer_head *bh = NULL;
                BUFFER_TRACE(bh_in, "enter");
 
        journal = handle->h_transaction->t_journal;
-       if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){
+       if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){
                J_ASSERT (!"Cannot set revoke feature!");
                return -EINVAL;
        }
                set_buffer_revoked(bh);
                set_buffer_revokevalid(bh);
                if (bh_in) {
-                       BUFFER_TRACE(bh_in, "call journal_forget");
-                       journal_forget(handle, bh_in);
+                       BUFFER_TRACE(bh_in, "call jbd2_journal_forget");
+                       jbd2_journal_forget(handle, bh_in);
                } else {
                        BUFFER_TRACE(bh, "call brelse");
                        __brelse(bh);
 
 /*
  * Cancel an outstanding revoke.  For use only internally by the
- * journaling code (called from journal_get_write_access).
+ * journaling code (called from jbd2_journal_get_write_access).
  *
  * We trust buffer_revoked() on the buffer if the buffer is already
  * being journaled: if there is no revoke pending on the buffer, then we
  *
  * The caller must have the journal locked.
  */
-int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
+int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
 {
-       struct jbd_revoke_record_s *record;
+       struct jbd2_revoke_record_s *record;
        journal_t *journal = handle->h_transaction->t_journal;
        int need_cancel;
        int did_revoke = 0;     /* akpm: debug */
                        spin_lock(&journal->j_revoke_lock);
                        list_del(&record->hash);
                        spin_unlock(&journal->j_revoke_lock);
-                       kmem_cache_free(revoke_record_cache, record);
+                       kmem_cache_free(jbd2_revoke_record_cache, record);
                        did_revoke = 1;
                }
        }
  * we do not want to suspend any processing until all revokes are
  * written -bzzz
  */
-void journal_switch_revoke_table(journal_t *journal)
+void jbd2_journal_switch_revoke_table(journal_t *journal)
 {
        int i;
 
  * Called with the journal lock held.
  */
 
-void journal_write_revoke_records(journal_t *journal,
+void jbd2_journal_write_revoke_records(journal_t *journal,
                                  transaction_t *transaction)
 {
        struct journal_head *descriptor;
-       struct jbd_revoke_record_s *record;
-       struct jbd_revoke_table_s *revoke;
+       struct jbd2_revoke_record_s *record;
+       struct jbd2_revoke_table_s *revoke;
        struct list_head *hash_list;
        int i, offset, count;
 
                hash_list = &revoke->hash_table[i];
 
                while (!list_empty(hash_list)) {
-                       record = (struct jbd_revoke_record_s *)
+                       record = (struct jbd2_revoke_record_s *)
                                hash_list->next;
                        write_one_revoke_record(journal, transaction,
                                                &descriptor, &offset,
                                                record);
                        count++;
                        list_del(&record->hash);
-                       kmem_cache_free(revoke_record_cache, record);
+                       kmem_cache_free(jbd2_revoke_record_cache, record);
                }
        }
        if (descriptor)
                                    transaction_t *transaction,
                                    struct journal_head **descriptorp,
                                    int *offsetp,
-                                   struct jbd_revoke_record_s *record)
+                                   struct jbd2_revoke_record_s *record)
 {
        struct journal_head *descriptor;
        int offset;
 
        /* If we are already aborting, this all becomes a noop.  We
            still need to go round the loop in
-           journal_write_revoke_records in order to free all of the
+           jbd2_journal_write_revoke_records in order to free all of the
            revoke records: only the IO to the journal is omitted. */
        if (is_journal_aborted(journal))
                return;
        }
 
        if (!descriptor) {
-               descriptor = journal_get_descriptor_buffer(journal);
+               descriptor = jbd2_journal_get_descriptor_buffer(journal);
                if (!descriptor)
                        return;
                header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
-               header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
-               header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK);
+               header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
+               header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK);
                header->h_sequence  = cpu_to_be32(transaction->t_tid);
 
                /* Record it so that we can wait for IO completion later */
                JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
-               journal_file_buffer(descriptor, transaction, BJ_LogCtl);
+               jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl);
 
-               offset = sizeof(journal_revoke_header_t);
+               offset = sizeof(jbd2_journal_revoke_header_t);
                *descriptorp = descriptor;
        }
 
                             struct journal_head *descriptor,
                             int offset)
 {
-       journal_revoke_header_t *header;
+       jbd2_journal_revoke_header_t *header;
        struct buffer_head *bh = jh2bh(descriptor);
 
        if (is_journal_aborted(journal)) {
                return;
        }
 
-       header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data;
+       header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
        header->r_count = cpu_to_be32(offset);
        set_buffer_jwrite(bh);
        BUFFER_TRACE(bh, "write");
  * single block.
  */
 
-int journal_set_revoke(journal_t *journal,
+int jbd2_journal_set_revoke(journal_t *journal,
                       unsigned long blocknr,
                       tid_t sequence)
 {
-       struct jbd_revoke_record_s *record;
+       struct jbd2_revoke_record_s *record;
 
        record = find_revoke_record(journal, blocknr);
        if (record) {
  * ones, but later transactions still need replayed.
  */
 
-int journal_test_revoke(journal_t *journal,
+int jbd2_journal_test_revoke(journal_t *journal,
                        unsigned long blocknr,
                        tid_t sequence)
 {
-       struct jbd_revoke_record_s *record;
+       struct jbd2_revoke_record_s *record;
 
        record = find_revoke_record(journal, blocknr);
        if (!record)
  * that it can be reused by the running filesystem.
  */
 
-void journal_clear_revoke(journal_t *journal)
+void jbd2_journal_clear_revoke(journal_t *journal)
 {
        int i;
        struct list_head *hash_list;
-       struct jbd_revoke_record_s *record;
-       struct jbd_revoke_table_s *revoke;
+       struct jbd2_revoke_record_s *record;
+       struct jbd2_revoke_table_s *revoke;
 
        revoke = journal->j_revoke;
 
        for (i = 0; i < revoke->hash_size; i++) {
                hash_list = &revoke->hash_table[i];
                while (!list_empty(hash_list)) {
-                       record = (struct jbd_revoke_record_s*) hash_list->next;
+                       record = (struct jbd2_revoke_record_s*) hash_list->next;
                        list_del(&record->hash);
-                       kmem_cache_free(revoke_record_cache, record);
+                       kmem_cache_free(jbd2_revoke_record_cache, record);
                }
        }
 }
 
 
 #include <linux/time.h>
 #include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/timer.h>
 #include <linux/highmem.h>
 
 /*
- * get_transaction: obtain a new transaction_t object.
+ * jbd2_get_transaction: obtain a new transaction_t object.
  *
  * Simply allocate and initialise a new transaction.  Create it in
  * RUNNING state and add it to the current journal (which should not
  */
 
 static transaction_t *
-get_transaction(journal_t *journal, transaction_t *transaction)
+jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
 {
        transaction->t_journal = journal;
        transaction->t_state = T_RUNNING;
        spin_lock(&journal->j_state_lock);
 repeat_locked:
        if (is_journal_aborted(journal) ||
-           (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
+           (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
                spin_unlock(&journal->j_state_lock);
                ret = -EROFS;
                goto out;
                        spin_unlock(&journal->j_state_lock);
                        goto alloc_transaction;
                }
-               get_transaction(journal, new_transaction);
+               jbd2_get_transaction(journal, new_transaction);
                new_transaction = NULL;
        }
 
                spin_unlock(&transaction->t_handle_lock);
                prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
                                TASK_UNINTERRUPTIBLE);
-               __log_start_commit(journal, transaction->t_tid);
+               __jbd2_log_start_commit(journal, transaction->t_tid);
                spin_unlock(&journal->j_state_lock);
                schedule();
                finish_wait(&journal->j_wait_transaction_locked, &wait);
         * committing_transaction->t_outstanding_credits plus "enough" for
         * the log control blocks.
         * Also, this test is inconsitent with the matching one in
-        * journal_extend().
+        * jbd2_journal_extend().
         */
-       if (__log_space_left(journal) < jbd_space_needed(journal)) {
+       if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
                jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
                spin_unlock(&transaction->t_handle_lock);
-               __log_wait_for_space(journal);
+               __jbd2_log_wait_for_space(journal);
                goto repeat_locked;
        }
 
        transaction->t_handle_count++;
        jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
                  handle, nblocks, transaction->t_outstanding_credits,
-                 __log_space_left(journal));
+                 __jbd2_log_space_left(journal));
        spin_unlock(&transaction->t_handle_lock);
        spin_unlock(&journal->j_state_lock);
 out:
 }
 
 /**
- * handle_t *journal_start() - Obtain a new handle.
+ * handle_t *jbd2_journal_start() - Obtain a new handle.
  * @journal: Journal to start transaction on.
  * @nblocks: number of block buffer we might modify
  *
  *
  * Return a pointer to a newly allocated handle, or NULL on failure
  */
-handle_t *journal_start(journal_t *journal, int nblocks)
+handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
 {
        handle_t *handle = journal_current_handle();
        int err;
 }
 
 /**
- * int journal_extend() - extend buffer credits.
+ * int jbd2_journal_extend() - extend buffer credits.
  * @handle:  handle to 'extend'
  * @nblocks: nr blocks to try to extend by.
  *
  * a credit for a number of buffer modications in advance, but can
  * extend its credit if it needs more.
  *
- * journal_extend tries to give the running handle more buffer credits.
+ * jbd2_journal_extend tries to give the running handle more buffer credits.
  * It does not guarantee that allocation - this is a best-effort only.
  * The calling process MUST be able to deal cleanly with a failure to
  * extend here.
  * return code < 0 implies an error
  * return code > 0 implies normal transaction-full status.
  */
-int journal_extend(handle_t *handle, int nblocks)
+int jbd2_journal_extend(handle_t *handle, int nblocks)
 {
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal = transaction->t_journal;
                goto unlock;
        }
 
-       if (wanted > __log_space_left(journal)) {
+       if (wanted > __jbd2_log_space_left(journal)) {
                jbd_debug(3, "denied handle %p %d blocks: "
                          "insufficient log space\n", handle, nblocks);
                goto unlock;
 
 
 /**
- * int journal_restart() - restart a handle .
+ * int jbd2_journal_restart() - restart a handle .
  * @handle:  handle to restart
  * @nblocks: nr credits requested
  *
  * Restart a handle for a multi-transaction filesystem
  * operation.
  *
- * If the journal_extend() call above fails to grant new buffer credits
- * to a running handle, a call to journal_restart will commit the
+ * If the jbd2_journal_extend() call above fails to grant new buffer credits
+ * to a running handle, a call to jbd2_journal_restart will commit the
  * handle's transaction so far and reattach the handle to a new
  * transaction capabable of guaranteeing the requested number of
  * credits.
  */
 
-int journal_restart(handle_t *handle, int nblocks)
+int jbd2_journal_restart(handle_t *handle, int nblocks)
 {
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal = transaction->t_journal;
        spin_unlock(&transaction->t_handle_lock);
 
        jbd_debug(2, "restarting handle %p\n", handle);
-       __log_start_commit(journal, transaction->t_tid);
+       __jbd2_log_start_commit(journal, transaction->t_tid);
        spin_unlock(&journal->j_state_lock);
 
        handle->h_buffer_credits = nblocks;
 
 
 /**
- * void journal_lock_updates () - establish a transaction barrier.
+ * void jbd2_journal_lock_updates () - establish a transaction barrier.
  * @journal:  Journal to establish a barrier on.
  *
  * This locks out any further updates from being started, and blocks
  *
  * The journal lock should not be held on entry.
  */
-void journal_lock_updates(journal_t *journal)
+void jbd2_journal_lock_updates(journal_t *journal)
 {
        DEFINE_WAIT(wait);
 
 
        /*
         * We have now established a barrier against other normal updates, but
-        * we also need to barrier against other journal_lock_updates() calls
+        * we also need to barrier against other jbd2_journal_lock_updates() calls
         * to make sure that we serialise special journal-locked operations
         * too.
         */
 }
 
 /**
- * void journal_unlock_updates (journal_t* journal) - release barrier
+ * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
  * @journal:  Journal to release the barrier on.
  *
- * Release a transaction barrier obtained with journal_lock_updates().
+ * Release a transaction barrier obtained with jbd2_journal_lock_updates().
  *
  * Should be called without the journal lock held.
  */
-void journal_unlock_updates (journal_t *journal)
+void jbd2_journal_unlock_updates (journal_t *journal)
 {
        J_ASSERT(journal->j_barrier_count != 0);
 
                                JBUFFER_TRACE(jh, "allocate memory for buffer");
                                jbd_unlock_bh_state(bh);
                                frozen_buffer =
-                                       jbd_slab_alloc(jh2bh(jh)->b_size,
+                                       jbd2_slab_alloc(jh2bh(jh)->b_size,
                                                         GFP_NOFS);
                                if (!frozen_buffer) {
                                        printk(KERN_EMERG
                jh->b_transaction = transaction;
                JBUFFER_TRACE(jh, "file as BJ_Reserved");
                spin_lock(&journal->j_list_lock);
-               __journal_file_buffer(jh, transaction, BJ_Reserved);
+               __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
                spin_unlock(&journal->j_list_lock);
        }
 
         * If we are about to journal a buffer, then any revoke pending on it is
         * no longer valid
         */
-       journal_cancel_revoke(handle, jh);
+       jbd2_journal_cancel_revoke(handle, jh);
 
 out:
        if (unlikely(frozen_buffer))    /* It's usually NULL */
-               jbd_slab_free(frozen_buffer, bh->b_size);
+               jbd2_slab_free(frozen_buffer, bh->b_size);
 
        JBUFFER_TRACE(jh, "exit");
        return error;
 }
 
 /**
- * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
+ * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
  * @handle: transaction to add buffer modifications to
  * @bh:     bh to be used for metadata writes
  * @credits: variable that will receive credits for the buffer
  * because we're write()ing a buffer which is also part of a shared mapping.
  */
 
-int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
 {
-       struct journal_head *jh = journal_add_journal_head(bh);
+       struct journal_head *jh = jbd2_journal_add_journal_head(bh);
        int rc;
 
        /* We do not want to get caught playing with fields which the
         * log thread also manipulates.  Make sure that the buffer
         * completes any outstanding IO before proceeding. */
        rc = do_get_write_access(handle, jh, 0);
-       journal_put_journal_head(jh);
+       jbd2_journal_put_journal_head(jh);
        return rc;
 }
 
  * unlocked buffer beforehand. */
 
 /**
- * int journal_get_create_access () - notify intent to use newly created bh
+ * int jbd2_journal_get_create_access () - notify intent to use newly created bh
  * @handle: transaction to new buffer to
  * @bh: new buffer.
  *
  * Call this if you create a new bh.
  */
-int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
 {
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal = transaction->t_journal;
-       struct journal_head *jh = journal_add_journal_head(bh);
+       struct journal_head *jh = jbd2_journal_add_journal_head(bh);
        int err;
 
        jbd_debug(5, "journal_head %p\n", jh);
        if (jh->b_transaction == NULL) {
                jh->b_transaction = transaction;
                JBUFFER_TRACE(jh, "file as BJ_Reserved");
-               __journal_file_buffer(jh, transaction, BJ_Reserved);
+               __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
        } else if (jh->b_transaction == journal->j_committing_transaction) {
                JBUFFER_TRACE(jh, "set next transaction");
                jh->b_next_transaction = transaction;
         * which hits an assertion error.
         */
        JBUFFER_TRACE(jh, "cancelling revoke");
-       journal_cancel_revoke(handle, jh);
-       journal_put_journal_head(jh);
+       jbd2_journal_cancel_revoke(handle, jh);
+       jbd2_journal_put_journal_head(jh);
 out:
        return err;
 }
 
 /**
- * int journal_get_undo_access() -  Notify intent to modify metadata with
+ * int jbd2_journal_get_undo_access() -  Notify intent to modify metadata with
  *     non-rewindable consequences
  * @handle: transaction
  * @bh: buffer to undo
  * since if we overwrote that space we would make the delete
  * un-rewindable in case of a crash.
  *
- * To deal with that, journal_get_undo_access requests write access to a
+ * To deal with that, jbd2_journal_get_undo_access requests write access to a
  * buffer for parts of non-rewindable operations such as delete
  * operations on the bitmaps.  The journaling code must keep a copy of
  * the buffer's contents prior to the undo_access call until such time
  *
  * Returns error number or 0 on success.
  */
-int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
 {
        int err;
-       struct journal_head *jh = journal_add_journal_head(bh);
+       struct journal_head *jh = jbd2_journal_add_journal_head(bh);
        char *committed_data = NULL;
 
        JBUFFER_TRACE(jh, "entry");
 
 repeat:
        if (!jh->b_committed_data) {
-               committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
+               committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
                if (!committed_data) {
                        printk(KERN_EMERG "%s: No memory for committed data\n",
                                __FUNCTION__);
        }
        jbd_unlock_bh_state(bh);
 out:
-       journal_put_journal_head(jh);
+       jbd2_journal_put_journal_head(jh);
        if (unlikely(committed_data))
-               jbd_slab_free(committed_data, bh->b_size);
+               jbd2_slab_free(committed_data, bh->b_size);
        return err;
 }
 
 /**
- * int journal_dirty_data() -  mark a buffer as containing dirty data which
+ * int jbd2_journal_dirty_data() -  mark a buffer as containing dirty data which
  *                             needs to be flushed before we can commit the
  *                             current transaction.
  * @handle: transaction
  *
  * Returns error number or 0 on success.
  *
- * journal_dirty_data() can be called via page_launder->ext3_writepage
+ * jbd2_journal_dirty_data() can be called via page_launder->ext3_writepage
  * by kswapd.
  */
-int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
 {
        journal_t *journal = handle->h_transaction->t_journal;
        int need_brelse = 0;
        if (is_handle_aborted(handle))
                return 0;
 
-       jh = journal_add_journal_head(bh);
+       jh = jbd2_journal_add_journal_head(bh);
        JBUFFER_TRACE(jh, "entry");
 
        /*
                         * And while we're in that state, someone does a
                         * writepage() in an attempt to pageout the same area
                         * of the file via a shared mapping.  At present that
-                        * calls journal_dirty_data(), and we get right here.
+                        * calls jbd2_journal_dirty_data(), and we get right here.
                         * It may be too late to journal the data.  Simply
                         * falling through to the next test will suffice: the
                         * data will be dirty and wil be checkpointed.  The
                        /* journal_clean_data_list() may have got there first */
                        if (jh->b_transaction != NULL) {
                                JBUFFER_TRACE(jh, "unfile from commit");
-                               __journal_temp_unlink_buffer(jh);
+                               __jbd2_journal_temp_unlink_buffer(jh);
                                /* It still points to the committing
                                 * transaction; move it to this one so
                                 * that the refile assert checks are
                if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
                        JBUFFER_TRACE(jh, "not on correct data list: unfile");
                        J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
-                       __journal_temp_unlink_buffer(jh);
+                       __jbd2_journal_temp_unlink_buffer(jh);
                        jh->b_transaction = handle->h_transaction;
                        JBUFFER_TRACE(jh, "file as data");
-                       __journal_file_buffer(jh, handle->h_transaction,
+                       __jbd2_journal_file_buffer(jh, handle->h_transaction,
                                                BJ_SyncData);
                }
        } else {
                JBUFFER_TRACE(jh, "not on a transaction");
-               __journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
+               __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
        }
 no_journal:
        spin_unlock(&journal->j_list_lock);
                __brelse(bh);
        }
        JBUFFER_TRACE(jh, "exit");
-       journal_put_journal_head(jh);
+       jbd2_journal_put_journal_head(jh);
        return 0;
 }
 
 /**
- * int journal_dirty_metadata() -  mark a buffer as containing dirty metadata
+ * int jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
  * @handle: transaction to add buffer to.
  * @bh: buffer to mark
  *
  * buffer: that only gets done when the old transaction finally
  * completes its commit.
  */
-int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
 {
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal = transaction->t_journal;
 
        JBUFFER_TRACE(jh, "file as BJ_Metadata");
        spin_lock(&journal->j_list_lock);
-       __journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
+       __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
        spin_unlock(&journal->j_list_lock);
 out_unlock_bh:
        jbd_unlock_bh_state(bh);
 }
 
 /*
- * journal_release_buffer: undo a get_write_access without any buffer
+ * jbd2_journal_release_buffer: undo a get_write_access without any buffer
  * updates, if the update decided in the end that it didn't need access.
  *
  */
 void
-journal_release_buffer(handle_t *handle, struct buffer_head *bh)
+jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
 {
        BUFFER_TRACE(bh, "entry");
 }
 
 /**
- * void journal_forget() - bforget() for potentially-journaled buffers.
+ * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
  * @handle: transaction handle
  * @bh:     bh to 'forget'
  *
  * Allow this call even if the handle has aborted --- it may be part of
  * the caller's cleanup after an abort.
  */
-int journal_forget (handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
 {
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal = transaction->t_journal;
                 */
 
                if (jh->b_cp_transaction) {
-                       __journal_temp_unlink_buffer(jh);
-                       __journal_file_buffer(jh, transaction, BJ_Forget);
+                       __jbd2_journal_temp_unlink_buffer(jh);
+                       __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
                } else {
-                       __journal_unfile_buffer(jh);
-                       journal_remove_journal_head(bh);
+                       __jbd2_journal_unfile_buffer(jh);
+                       jbd2_journal_remove_journal_head(bh);
                        __brelse(bh);
                        if (!buffer_jbd(bh)) {
                                spin_unlock(&journal->j_list_lock);
 }
 
 /**
- * int journal_stop() - complete a transaction
+ * int jbd2_journal_stop() - complete a transaction
  * @handle: tranaction to complete.
  *
  * All done for a particular handle.
  * complication is that we need to start a commit operation if the
  * filesystem is marked for synchronous update.
  *
- * journal_stop itself will not usually return an error, but it may
+ * jbd2_journal_stop itself will not usually return an error, but it may
  * do so in unusual circumstances.  In particular, expect it to
- * return -EIO if a journal_abort has been executed since the
+ * return -EIO if a jbd2_journal_abort has been executed since the
  * transaction began.
  */
-int journal_stop(handle_t *handle)
+int jbd2_journal_stop(handle_t *handle)
 {
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal = transaction->t_journal;
                jbd_debug(2, "transaction too old, requesting commit for "
                                        "handle %p\n", handle);
                /* This is non-blocking */
-               __log_start_commit(journal, transaction->t_tid);
+               __jbd2_log_start_commit(journal, transaction->t_tid);
                spin_unlock(&journal->j_state_lock);
 
                /*
-                * Special case: JFS_SYNC synchronous updates require us
+                * Special case: JBD2_SYNC synchronous updates require us
                 * to wait for the commit to complete.
                 */
                if (handle->h_sync && !(current->flags & PF_MEMALLOC))
-                       err = log_wait_commit(journal, tid);
+                       err = jbd2_log_wait_commit(journal, tid);
        } else {
                spin_unlock(&transaction->t_handle_lock);
                spin_unlock(&journal->j_state_lock);
        return err;
 }
 
-/**int journal_force_commit() - force any uncommitted transactions
+/**int jbd2_journal_force_commit() - force any uncommitted transactions
  * @journal: journal to force
  *
  * For synchronous operations: force any uncommitted transactions
  * to disk.  May seem kludgy, but it reuses all the handle batching
  * code in a very simple manner.
  */
-int journal_force_commit(journal_t *journal)
+int jbd2_journal_force_commit(journal_t *journal)
 {
        handle_t *handle;
        int ret;
 
-       handle = journal_start(journal, 1);
+       handle = jbd2_journal_start(journal, 1);
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
        } else {
                handle->h_sync = 1;
-               ret = journal_stop(handle);
+               ret = jbd2_journal_stop(handle);
        }
        return ret;
 }
  *
  * Called under j_list_lock.  The journal may not be locked.
  */
-void __journal_temp_unlink_buffer(struct journal_head *jh)
+void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
 {
        struct journal_head **list = NULL;
        transaction_t *transaction;
                mark_buffer_dirty(bh);  /* Expose it to the VM */
 }
 
-void __journal_unfile_buffer(struct journal_head *jh)
+void __jbd2_journal_unfile_buffer(struct journal_head *jh)
 {
-       __journal_temp_unlink_buffer(jh);
+       __jbd2_journal_temp_unlink_buffer(jh);
        jh->b_transaction = NULL;
 }
 
-void journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
+void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
 {
        jbd_lock_bh_state(jh2bh(jh));
        spin_lock(&journal->j_list_lock);
-       __journal_unfile_buffer(jh);
+       __jbd2_journal_unfile_buffer(jh);
        spin_unlock(&journal->j_list_lock);
        jbd_unlock_bh_state(jh2bh(jh));
 }
 
 /*
- * Called from journal_try_to_free_buffers().
+ * Called from jbd2_journal_try_to_free_buffers().
  *
  * Called under jbd_lock_bh_state(bh)
  */
                if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
                        /* A written-back ordered data buffer */
                        JBUFFER_TRACE(jh, "release data");
-                       __journal_unfile_buffer(jh);
-                       journal_remove_journal_head(bh);
+                       __jbd2_journal_unfile_buffer(jh);
+                       jbd2_journal_remove_journal_head(bh);
                        __brelse(bh);
                }
        } else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {
                /* written-back checkpointed metadata buffer */
                if (jh->b_jlist == BJ_None) {
                        JBUFFER_TRACE(jh, "remove from checkpoint list");
-                       __journal_remove_checkpoint(jh);
-                       journal_remove_journal_head(bh);
+                       __jbd2_journal_remove_checkpoint(jh);
+                       jbd2_journal_remove_journal_head(bh);
                        __brelse(bh);
                }
        }
 
 
 /**
- * int journal_try_to_free_buffers() - try to free page buffers.
+ * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
  * @journal: journal for operation
  * @page: to try and free
  * @unused_gfp_mask: unused
  *
  * This complicates JBD locking somewhat.  We aren't protected by the
  * BKL here.  We wish to remove the buffer from its committing or
- * running transaction's ->t_datalist via __journal_unfile_buffer.
+ * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
  *
  * This may *change* the value of transaction_t->t_datalist, so anyone
  * who looks at t_datalist needs to lock against this function.
  *
- * Even worse, someone may be doing a journal_dirty_data on this
- * buffer.  So we need to lock against that.  journal_dirty_data()
+ * Even worse, someone may be doing a jbd2_journal_dirty_data on this
+ * buffer.  So we need to lock against that.  jbd2_journal_dirty_data()
  * will come out of the lock with the buffer dirty, which makes it
  * ineligible for release here.
  *
  * cannot happen because we never reallocate freed data as metadata
  * while the data is part of a transaction.  Yes?
  */
-int journal_try_to_free_buffers(journal_t *journal,
+int jbd2_journal_try_to_free_buffers(journal_t *journal,
                                struct page *page, gfp_t unused_gfp_mask)
 {
        struct buffer_head *head;
                /*
                 * We take our own ref against the journal_head here to avoid
                 * having to add tons of locking around each instance of
-                * journal_remove_journal_head() and journal_put_journal_head().
+                * jbd2_journal_remove_journal_head() and jbd2_journal_put_journal_head().
                 */
-               jh = journal_grab_journal_head(bh);
+               jh = jbd2_journal_grab_journal_head(bh);
                if (!jh)
                        continue;
 
                jbd_lock_bh_state(bh);
                __journal_try_to_free_buffer(journal, bh);
-               journal_put_journal_head(jh);
+               jbd2_journal_put_journal_head(jh);
                jbd_unlock_bh_state(bh);
                if (buffer_jbd(bh))
                        goto busy;
        int may_free = 1;
        struct buffer_head *bh = jh2bh(jh);
 
-       __journal_unfile_buffer(jh);
+       __jbd2_journal_unfile_buffer(jh);
 
        if (jh->b_cp_transaction) {
                JBUFFER_TRACE(jh, "on running+cp transaction");
-               __journal_file_buffer(jh, transaction, BJ_Forget);
+               __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
                clear_buffer_jbddirty(bh);
                may_free = 0;
        } else {
                JBUFFER_TRACE(jh, "on running transaction");
-               journal_remove_journal_head(bh);
+               jbd2_journal_remove_journal_head(bh);
                __brelse(bh);
        }
        return may_free;
 }
 
 /*
- * journal_invalidatepage
+ * jbd2_journal_invalidatepage
  *
  * This code is tricky.  It has a number of cases to deal with.
  *
        jbd_lock_bh_state(bh);
        spin_lock(&journal->j_list_lock);
 
-       jh = journal_grab_journal_head(bh);
+       jh = jbd2_journal_grab_journal_head(bh);
        if (!jh)
                goto zap_buffer_no_jh;
 
                        JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
                        ret = __dispose_buffer(jh,
                                        journal->j_running_transaction);
-                       journal_put_journal_head(jh);
+                       jbd2_journal_put_journal_head(jh);
                        spin_unlock(&journal->j_list_lock);
                        jbd_unlock_bh_state(bh);
                        spin_unlock(&journal->j_state_lock);
                                JBUFFER_TRACE(jh, "give to committing trans");
                                ret = __dispose_buffer(jh,
                                        journal->j_committing_transaction);
-                               journal_put_journal_head(jh);
+                               jbd2_journal_put_journal_head(jh);
                                spin_unlock(&journal->j_list_lock);
                                jbd_unlock_bh_state(bh);
                                spin_unlock(&journal->j_state_lock);
                                        journal->j_running_transaction);
                        jh->b_next_transaction = NULL;
                }
-               journal_put_journal_head(jh);
+               jbd2_journal_put_journal_head(jh);
                spin_unlock(&journal->j_list_lock);
                jbd_unlock_bh_state(bh);
                spin_unlock(&journal->j_state_lock);
        }
 
 zap_buffer:
-       journal_put_journal_head(jh);
+       jbd2_journal_put_journal_head(jh);
 zap_buffer_no_jh:
        spin_unlock(&journal->j_list_lock);
        jbd_unlock_bh_state(bh);
 }
 
 /**
- * void journal_invalidatepage()
+ * void jbd2_journal_invalidatepage()
  * @journal: journal to use for flush...
  * @page:    page to flush
  * @offset:  length of page to invalidate.
  * Reap page buffers containing data after offset in page.
  *
  */
-void journal_invalidatepage(journal_t *journal,
+void jbd2_journal_invalidatepage(journal_t *journal,
                      struct page *page,
                      unsigned long offset)
 {
 /*
  * File a buffer on the given transaction list.
  */
-void __journal_file_buffer(struct journal_head *jh,
+void __jbd2_journal_file_buffer(struct journal_head *jh,
                        transaction_t *transaction, int jlist)
 {
        struct journal_head **list = NULL;
        }
 
        if (jh->b_transaction)
-               __journal_temp_unlink_buffer(jh);
+               __jbd2_journal_temp_unlink_buffer(jh);
        jh->b_transaction = transaction;
 
        switch (jlist) {
                set_buffer_jbddirty(bh);
 }
 
-void journal_file_buffer(struct journal_head *jh,
+void jbd2_journal_file_buffer(struct journal_head *jh,
                                transaction_t *transaction, int jlist)
 {
        jbd_lock_bh_state(jh2bh(jh));
        spin_lock(&transaction->t_journal->j_list_lock);
-       __journal_file_buffer(jh, transaction, jlist);
+       __jbd2_journal_file_buffer(jh, transaction, jlist);
        spin_unlock(&transaction->t_journal->j_list_lock);
        jbd_unlock_bh_state(jh2bh(jh));
 }
  *
  * Called under jbd_lock_bh_state(jh2bh(jh))
  */
-void __journal_refile_buffer(struct journal_head *jh)
+void __jbd2_journal_refile_buffer(struct journal_head *jh)
 {
        int was_dirty;
        struct buffer_head *bh = jh2bh(jh);
 
        /* If the buffer is now unused, just drop it. */
        if (jh->b_next_transaction == NULL) {
-               __journal_unfile_buffer(jh);
+               __jbd2_journal_unfile_buffer(jh);
                return;
        }
 
         */
 
        was_dirty = test_clear_buffer_jbddirty(bh);
-       __journal_temp_unlink_buffer(jh);
+       __jbd2_journal_temp_unlink_buffer(jh);
        jh->b_transaction = jh->b_next_transaction;
        jh->b_next_transaction = NULL;
-       __journal_file_buffer(jh, jh->b_transaction,
+       __jbd2_journal_file_buffer(jh, jh->b_transaction,
                                was_dirty ? BJ_Metadata : BJ_Reserved);
        J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
 
  * For the unlocked version of this call, also make sure that any
  * hanging journal_head is cleaned up if necessary.
  *
- * __journal_refile_buffer is usually called as part of a single locked
+ * __jbd2_journal_refile_buffer is usually called as part of a single locked
  * operation on a buffer_head, in which the caller is probably going to
  * be hooking the journal_head onto other lists.  In that case it is up
  * to the caller to remove the journal_head if necessary.  For the
- * unlocked journal_refile_buffer call, the caller isn't going to be
+ * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
  * doing anything else to the buffer so we need to do the cleanup
  * ourselves to avoid a jh leak.
  *
  * *** The journal_head may be freed by this call! ***
  */
-void journal_refile_buffer(journal_t *journal, struct journal_head *jh)
+void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
 {
        struct buffer_head *bh = jh2bh(jh);
 
        jbd_lock_bh_state(bh);
        spin_lock(&journal->j_list_lock);
 
-       __journal_refile_buffer(jh);
+       __jbd2_journal_refile_buffer(jh);
        jbd_unlock_bh_state(bh);
-       journal_remove_journal_head(bh);
+       jbd2_journal_remove_journal_head(bh);
 
        spin_unlock(&journal->j_list_lock);
        __brelse(bh);
 
 /*
- * linux/include/linux/ext4_jbd.h
+ * linux/include/linux/ext4_jbd2.h
  *
  * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
  *
 #define _LINUX_EXT4_JBD_H
 
 #include <linux/fs.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
 #include <linux/ext4_fs.h>
 
 #define EXT4_JOURNAL(inode)    (EXT4_SB((inode)->i_sb)->s_journal)
 __ext4_journal_get_undo_access(const char *where, handle_t *handle,
                                struct buffer_head *bh)
 {
-       int err = journal_get_undo_access(handle, bh);
+       int err = jbd2_journal_get_undo_access(handle, bh);
        if (err)
                ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
        return err;
 __ext4_journal_get_write_access(const char *where, handle_t *handle,
                                struct buffer_head *bh)
 {
-       int err = journal_get_write_access(handle, bh);
+       int err = jbd2_journal_get_write_access(handle, bh);
        if (err)
                ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
        return err;
 static inline void
 ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
 {
-       journal_release_buffer(handle, bh);
+       jbd2_journal_release_buffer(handle, bh);
 }
 
 static inline int
 __ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
 {
-       int err = journal_forget(handle, bh);
+       int err = jbd2_journal_forget(handle, bh);
        if (err)
                ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
        return err;
 __ext4_journal_revoke(const char *where, handle_t *handle,
                      unsigned long blocknr, struct buffer_head *bh)
 {
-       int err = journal_revoke(handle, blocknr, bh);
+       int err = jbd2_journal_revoke(handle, blocknr, bh);
        if (err)
                ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
        return err;
 __ext4_journal_get_create_access(const char *where,
                                 handle_t *handle, struct buffer_head *bh)
 {
-       int err = journal_get_create_access(handle, bh);
+       int err = jbd2_journal_get_create_access(handle, bh);
        if (err)
                ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
        return err;
 __ext4_journal_dirty_metadata(const char *where,
                              handle_t *handle, struct buffer_head *bh)
 {
-       int err = journal_dirty_metadata(handle, bh);
+       int err = jbd2_journal_dirty_metadata(handle, bh);
        if (err)
                ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
        return err;
 
 static inline int ext4_journal_extend(handle_t *handle, int nblocks)
 {
-       return journal_extend(handle, nblocks);
+       return jbd2_journal_extend(handle, nblocks);
 }
 
 static inline int ext4_journal_restart(handle_t *handle, int nblocks)
 {
-       return journal_restart(handle, nblocks);
+       return jbd2_journal_restart(handle, nblocks);
 }
 
 static inline int ext4_journal_blocks_per_page(struct inode *inode)
 {
-       return journal_blocks_per_page(inode);
+       return jbd2_journal_blocks_per_page(inode);
 }
 
 static inline int ext4_journal_force_commit(journal_t *journal)
 {
-       return journal_force_commit(journal);
+       return jbd2_journal_force_commit(journal);
 }
 
 /* super.c */
 
 /*
- * linux/include/linux/jbd.h
+ * linux/include/linux/jbd2.h
  *
  * Written by Stephen C. Tweedie <sct@redhat.com>
  *
 /* Allow this file to be included directly into e2fsprogs */
 #ifndef __KERNEL__
 #include "jfs_compat.h"
-#define JFS_DEBUG
+#define JBD2_DEBUG
 #define jfs_debug jbd_debug
 #else
 
  * CONFIG_JBD_DEBUG is on.
  */
 #define JBD_EXPENSIVE_CHECKING
-extern int journal_enable_debug;
+extern int jbd2_journal_enable_debug;
 
 #define jbd_debug(n, f, a...)                                          \
        do {                                                            \
-               if ((n) <= journal_enable_debug) {                      \
+               if ((n) <= jbd2_journal_enable_debug) {                 \
                        printk (KERN_DEBUG "(%s, %d): %s: ",            \
                                __FILE__, __LINE__, __FUNCTION__);      \
                        printk (f, ## a);                               \
 #define jbd_debug(f, a...)     /**/
 #endif
 
-extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
-extern void * jbd_slab_alloc(size_t size, gfp_t flags);
-extern void jbd_slab_free(void *ptr, size_t size);
+extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
+extern void * jbd2_slab_alloc(size_t size, gfp_t flags);
+extern void jbd2_slab_free(void *ptr, size_t size);
 
 #define jbd_kmalloc(size, flags) \
-       __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
+       __jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
 #define jbd_rep_kmalloc(size, flags) \
-       __jbd_kmalloc(__FUNCTION__, (size), (flags), 1)
+       __jbd2_kmalloc(__FUNCTION__, (size), (flags), 1)
 
-#define JFS_MIN_JOURNAL_BLOCKS 1024
+#define JBD2_MIN_JOURNAL_BLOCKS 1024
 
 #ifdef __KERNEL__
 
  * Internal structures used by the logging mechanism:
  */
 
-#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
+#define JBD2_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
 
 /*
  * On-disk structures
  * Descriptor block types:
  */
 
-#define JFS_DESCRIPTOR_BLOCK   1
-#define JFS_COMMIT_BLOCK       2
-#define JFS_SUPERBLOCK_V1      3
-#define JFS_SUPERBLOCK_V2      4
-#define JFS_REVOKE_BLOCK       5
+#define JBD2_DESCRIPTOR_BLOCK  1
+#define JBD2_COMMIT_BLOCK      2
+#define JBD2_SUPERBLOCK_V1     3
+#define JBD2_SUPERBLOCK_V2     4
+#define JBD2_REVOKE_BLOCK      5
 
 /*
  * Standard header for all descriptor blocks:
  * The revoke descriptor: used on disk to describe a series of blocks to
  * be revoked from the log
  */
-typedef struct journal_revoke_header_s
+typedef struct jbd2_journal_revoke_header_s
 {
        journal_header_t r_header;
        __be32           r_count;       /* Count of bytes used in the block */
-} journal_revoke_header_t;
+} jbd2_journal_revoke_header_t;
 
 
 /* Definitions for the journal tag flags word: */
-#define JFS_FLAG_ESCAPE                1       /* on-disk block is escaped */
-#define JFS_FLAG_SAME_UUID     2       /* block has same uuid as previous */
-#define JFS_FLAG_DELETED       4       /* block deleted by this transaction */
-#define JFS_FLAG_LAST_TAG      8       /* last tag in this descriptor block */
+#define JBD2_FLAG_ESCAPE               1       /* on-disk block is escaped */
+#define JBD2_FLAG_SAME_UUID    2       /* block has same uuid as previous */
+#define JBD2_FLAG_DELETED      4       /* block deleted by this transaction */
+#define JBD2_FLAG_LAST_TAG     8       /* last tag in this descriptor block */
 
 
 /*
        __be32  s_start;                /* blocknr of start of log */
 
 /* 0x0020 */
-       /* Error value, as set by journal_abort(). */
+       /* Error value, as set by jbd2_journal_abort(). */
        __be32  s_errno;
 
 /* 0x0024 */
 /* 0x0400 */
 } journal_superblock_t;
 
-#define JFS_HAS_COMPAT_FEATURE(j,mask)                                 \
+#define JBD2_HAS_COMPAT_FEATURE(j,mask)                                        \
        ((j)->j_format_version >= 2 &&                                  \
         ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JFS_HAS_RO_COMPAT_FEATURE(j,mask)                              \
+#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask)                             \
        ((j)->j_format_version >= 2 &&                                  \
         ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JFS_HAS_INCOMPAT_FEATURE(j,mask)                               \
+#define JBD2_HAS_INCOMPAT_FEATURE(j,mask)                              \
        ((j)->j_format_version >= 2 &&                                  \
         ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
 
-#define JFS_FEATURE_INCOMPAT_REVOKE    0x00000001
+#define JBD2_FEATURE_INCOMPAT_REVOKE   0x00000001
 
 /* Features known to this kernel version: */
-#define JFS_KNOWN_COMPAT_FEATURES      0
-#define JFS_KNOWN_ROCOMPAT_FEATURES    0
-#define JFS_KNOWN_INCOMPAT_FEATURES    JFS_FEATURE_INCOMPAT_REVOKE
+#define JBD2_KNOWN_COMPAT_FEATURES     0
+#define JBD2_KNOWN_ROCOMPAT_FEATURES   0
+#define JBD2_KNOWN_INCOMPAT_FEATURES   JBD2_FEATURE_INCOMPAT_REVOKE
 
 #ifdef __KERNEL__
 
        bit_spin_unlock(BH_JournalHead, &bh->b_state);
 }
 
-struct jbd_revoke_table_s;
+struct jbd2_revoke_table_s;
 
 /**
  * struct handle_s - The handle_s type is the concrete type associated with
 
        /*
         * Transaction's current state
-        * [no locking - only kjournald alters this]
+        * [no locking - only kjournald2 alters this]
         * FIXME: needs barriers
         * KLUDGE: [use j_state_lock]
         */
  * @j_revoke: The revoke table - maintains the list of revoked blocks in the
  *     current transaction.
  * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for journal_commit_transaction
+ * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
  * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
  *     number that will fit in j_blocksize
  * @j_last_sync_writer: most recent pid which did a synchronous write
         * current transaction.  [j_revoke_lock]
         */
        spinlock_t              j_revoke_lock;
-       struct jbd_revoke_table_s *j_revoke;
-       struct jbd_revoke_table_s *j_revoke_table[2];
+       struct jbd2_revoke_table_s *j_revoke;
+       struct jbd2_revoke_table_s *j_revoke_table[2];
 
        /*
-        * array of bhs for journal_commit_transaction
+        * array of bhs for jbd2_journal_commit_transaction
         */
        struct buffer_head      **j_wbuf;
        int                     j_wbufsize;
 /*
  * Journal flag definitions
  */
-#define JFS_UNMOUNT    0x001   /* Journal thread is being destroyed */
-#define JFS_ABORT      0x002   /* Journaling has been aborted for errors. */
-#define JFS_ACK_ERR    0x004   /* The errno in the sb has been acked */
-#define JFS_FLUSHED    0x008   /* The journal superblock has been flushed */
-#define JFS_LOADED     0x010   /* The journal superblock has been loaded */
-#define JFS_BARRIER    0x020   /* Use IDE barriers */
+#define JBD2_UNMOUNT   0x001   /* Journal thread is being destroyed */
+#define JBD2_ABORT     0x002   /* Journaling has been aborted for errors. */
+#define JBD2_ACK_ERR   0x004   /* The errno in the sb has been acked */
+#define JBD2_FLUSHED   0x008   /* The journal superblock has been flushed */
+#define JBD2_LOADED    0x010   /* The journal superblock has been loaded */
+#define JBD2_BARRIER   0x020   /* Use IDE barriers */
 
 /*
  * Function declarations for the journaling transaction and buffer
  */
 
 /* Filing buffers */
-extern void __journal_temp_unlink_buffer(struct journal_head *jh);
-extern void journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __journal_unfile_buffer(struct journal_head *);
-extern void __journal_refile_buffer(struct journal_head *);
-extern void journal_refile_buffer(journal_t *, struct journal_head *);
-extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
+extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
+extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
+extern void __jbd2_journal_unfile_buffer(struct journal_head *);
+extern void __jbd2_journal_refile_buffer(struct journal_head *);
+extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
+extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
 extern void __journal_free_buffer(struct journal_head *bh);
-extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
+extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
 extern void __journal_clean_data_list(transaction_t *transaction);
 
 /* Log buffer allocation */
-extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
-int journal_next_log_block(journal_t *, unsigned long *);
+extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *);
+int jbd2_journal_next_log_block(journal_t *, unsigned long *);
 
 /* Commit management */
-extern void journal_commit_transaction(journal_t *);
+extern void jbd2_journal_commit_transaction(journal_t *);
 
 /* Checkpoint list management */
-int __journal_clean_checkpoint_list(journal_t *journal);
-int __journal_remove_checkpoint(struct journal_head *);
-void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
+int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
 
 /* Buffer IO */
 extern int
-journal_write_metadata_buffer(transaction_t      *transaction,
+jbd2_journal_write_metadata_buffer(transaction_t         *transaction,
                              struct journal_head  *jh_in,
                              struct journal_head **jh_out,
                              unsigned long        blocknr);
  * Register buffer modifications against the current transaction.
  */
 
-extern handle_t *journal_start(journal_t *, int nblocks);
-extern int      journal_restart (handle_t *, int nblocks);
-extern int      journal_extend (handle_t *, int nblocks);
-extern int      journal_get_write_access(handle_t *, struct buffer_head *);
-extern int      journal_get_create_access (handle_t *, struct buffer_head *);
-extern int      journal_get_undo_access(handle_t *, struct buffer_head *);
-extern int      journal_dirty_data (handle_t *, struct buffer_head *);
-extern int      journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void     journal_release_buffer (handle_t *, struct buffer_head *);
-extern int      journal_forget (handle_t *, struct buffer_head *);
+extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
+extern int      jbd2_journal_restart (handle_t *, int nblocks);
+extern int      jbd2_journal_extend (handle_t *, int nblocks);
+extern int      jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
+extern int      jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
+extern int      jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
+extern int      jbd2_journal_dirty_data (handle_t *, struct buffer_head *);
+extern int      jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
+extern void     jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
+extern int      jbd2_journal_forget (handle_t *, struct buffer_head *);
 extern void     journal_sync_buffer (struct buffer_head *);
-extern void     journal_invalidatepage(journal_t *,
+extern void     jbd2_journal_invalidatepage(journal_t *,
                                struct page *, unsigned long);
-extern int      journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
-extern int      journal_stop(handle_t *);
-extern int      journal_flush (journal_t *);
-extern void     journal_lock_updates (journal_t *);
-extern void     journal_unlock_updates (journal_t *);
+extern int      jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+extern int      jbd2_journal_stop(handle_t *);
+extern int      jbd2_journal_flush (journal_t *);
+extern void     jbd2_journal_lock_updates (journal_t *);
+extern void     jbd2_journal_unlock_updates (journal_t *);
 
-extern journal_t * journal_init_dev(struct block_device *bdev,
+extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
                                struct block_device *fs_dev,
                                int start, int len, int bsize);
-extern journal_t * journal_init_inode (struct inode *);
-extern int        journal_update_format (journal_t *);
-extern int        journal_check_used_features
+extern journal_t * jbd2_journal_init_inode (struct inode *);
+extern int        jbd2_journal_update_format (journal_t *);
+extern int        jbd2_journal_check_used_features
                   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int        journal_check_available_features
+extern int        jbd2_journal_check_available_features
                   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int        journal_set_features
+extern int        jbd2_journal_set_features
                   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int        journal_create     (journal_t *);
-extern int        journal_load       (journal_t *journal);
-extern void       journal_destroy    (journal_t *);
-extern int        journal_recover    (journal_t *journal);
-extern int        journal_wipe       (journal_t *, int);
-extern int        journal_skip_recovery        (journal_t *);
-extern void       journal_update_superblock    (journal_t *, int);
-extern void       __journal_abort_hard (journal_t *);
-extern void       journal_abort      (journal_t *, int);
-extern int        journal_errno      (journal_t *);
-extern void       journal_ack_err    (journal_t *);
-extern int        journal_clear_err  (journal_t *);
-extern int        journal_bmap(journal_t *, unsigned long, unsigned long *);
-extern int        journal_force_commit(journal_t *);
+extern int        jbd2_journal_create     (journal_t *);
+extern int        jbd2_journal_load       (journal_t *journal);
+extern void       jbd2_journal_destroy    (journal_t *);
+extern int        jbd2_journal_recover    (journal_t *journal);
+extern int        jbd2_journal_wipe       (journal_t *, int);
+extern int        jbd2_journal_skip_recovery   (journal_t *);
+extern void       jbd2_journal_update_superblock       (journal_t *, int);
+extern void       __jbd2_journal_abort_hard    (journal_t *);
+extern void       jbd2_journal_abort      (journal_t *, int);
+extern int        jbd2_journal_errno      (journal_t *);
+extern void       jbd2_journal_ack_err    (journal_t *);
+extern int        jbd2_journal_clear_err  (journal_t *);
+extern int        jbd2_journal_bmap(journal_t *, unsigned long, unsigned long *);
+extern int        jbd2_journal_force_commit(journal_t *);
 
 /*
  * journal_head management
  */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh);
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
-void journal_remove_journal_head(struct buffer_head *bh);
-void journal_put_journal_head(struct journal_head *jh);
+struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
+struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
+void jbd2_journal_remove_journal_head(struct buffer_head *bh);
+void jbd2_journal_put_journal_head(struct journal_head *jh);
 
 /*
  * handle management
  */
-extern kmem_cache_t *jbd_handle_cache;
+extern kmem_cache_t *jbd2_handle_cache;
 
 static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
 {
-       return kmem_cache_alloc(jbd_handle_cache, gfp_flags);
+       return kmem_cache_alloc(jbd2_handle_cache, gfp_flags);
 }
 
 static inline void jbd_free_handle(handle_t *handle)
 {
-       kmem_cache_free(jbd_handle_cache, handle);
+       kmem_cache_free(jbd2_handle_cache, handle);
 }
 
 /* Primary revoke support */
 #define JOURNAL_REVOKE_DEFAULT_HASH 256
-extern int        journal_init_revoke(journal_t *, int);
-extern void       journal_destroy_revoke_caches(void);
-extern int        journal_init_revoke_caches(void);
+extern int        jbd2_journal_init_revoke(journal_t *, int);
+extern void       jbd2_journal_destroy_revoke_caches(void);
+extern int        jbd2_journal_init_revoke_caches(void);
 
-extern void       journal_destroy_revoke(journal_t *);
-extern int        journal_revoke (handle_t *,
+extern void       jbd2_journal_destroy_revoke(journal_t *);
+extern int        jbd2_journal_revoke (handle_t *,
                                unsigned long, struct buffer_head *);
-extern int        journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void       journal_write_revoke_records(journal_t *, transaction_t *);
+extern int        jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
+extern void       jbd2_journal_write_revoke_records(journal_t *, transaction_t *);
 
 /* Recovery revoke support */
-extern int     journal_set_revoke(journal_t *, unsigned long, tid_t);
-extern int     journal_test_revoke(journal_t *, unsigned long, tid_t);
-extern void    journal_clear_revoke(journal_t *);
-extern void    journal_switch_revoke_table(journal_t *journal);
+extern int     jbd2_journal_set_revoke(journal_t *, unsigned long, tid_t);
+extern int     jbd2_journal_test_revoke(journal_t *, unsigned long, tid_t);
+extern void    jbd2_journal_clear_revoke(journal_t *);
+extern void    jbd2_journal_switch_revoke_table(journal_t *journal);
 
 /*
  * The log thread user interface:
  * transitions on demand.
  */
 
-int __log_space_left(journal_t *); /* Called with journal locked */
-int log_start_commit(journal_t *journal, tid_t tid);
-int __log_start_commit(journal_t *journal, tid_t tid);
-int journal_start_commit(journal_t *journal, tid_t *tid);
-int journal_force_commit_nested(journal_t *journal);
-int log_wait_commit(journal_t *journal, tid_t tid);
-int log_do_checkpoint(journal_t *journal);
+int __jbd2_log_space_left(journal_t *); /* Called with journal locked */
+int jbd2_log_start_commit(journal_t *journal, tid_t tid);
+int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
+int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
+int jbd2_journal_force_commit_nested(journal_t *journal);
+int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
+int jbd2_log_do_checkpoint(journal_t *journal);
 
-void __log_wait_for_space(journal_t *journal);
-extern void    __journal_drop_transaction(journal_t *, transaction_t *);
-extern int     cleanup_journal_tail(journal_t *);
+void __jbd2_log_wait_for_space(journal_t *journal);
+extern void    __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
+extern int     jbd2_cleanup_journal_tail(journal_t *);
 
 /* Debugging code only: */
 
 /*
  * is_journal_abort
  *
- * Simple test wrapper function to test the JFS_ABORT state flag.  This
+ * Simple test wrapper function to test the JBD2_ABORT state flag.  This
  * bit, when set, indicates that we have had a fatal error somewhere,
  * either inside the journaling layer or indicated to us by the client
  * (eg. ext3), and that we and should not commit any further
 
 static inline int is_journal_aborted(journal_t *journal)
 {
-       return journal->j_flags & JFS_ABORT;
+       return journal->j_flags & JBD2_ABORT;
 }
 
 static inline int is_handle_aborted(handle_t *handle)
        return is_journal_aborted(handle->h_transaction->t_journal);
 }
 
-static inline void journal_abort_handle(handle_t *handle)
+static inline void jbd2_journal_abort_handle(handle_t *handle)
 {
        handle->h_aborted = 1;
 }
        return (difference >= 0);
 }
 
-extern int journal_blocks_per_page(struct inode *inode);
+extern int jbd2_journal_blocks_per_page(struct inode *inode);
 
 /*
  * Return the minimum number of blocks which must be free in the journal